code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
"""
For the moment we're not using this, but it would be nice to do so
at some point. It isn't really relevant to the researcher RDF export, the
idea would be to export details of a web-presence, to compile a complete directory
of websites in the Medical Sciences Division.
"""
def addWebPresenceElement(rdf, deptElement, deptName):
"""
<foaf:Organization rdf:about="http://www.psy.ox.ac.uk/">
<foaf:homepage>
<res:WebPresence>
<dc:title>Department of Experimental Psychology Website</dc:title>
<dc:creator>
<foaf:Person>
<foaf:mbox>anne.bowtell@medsci.ox.ac.uk</foaf:mbox>
</foaf:Person>
</dc:creator>
</res:WebPresence>
</foaf:homepage>
"""
#TODO - this will be real data in the future
standardWebmaster = 'webmaster@medsci.ox.ac.uk'
homepageProp = rdf.createElement('foaf:homepage')
deptElement.appendChild(homepageProp)
wpElem = rdf.createElement('res:WebPresence')
homepageProp.appendChild(wpElem)
title = rdf.createElement('dc:title')
title.appendChild(rdf.createTextNode(deptName + ' Website'))
wpElem.appendChild(title)
creator = rdf.createElement('dc:creator')
wpElem.appendChild(creator)
person = rdf.createElement('foaf:Person')
creator.appendChild(person)
mbox = rdf.createElement('foaf:mbox')
person.appendChild(mbox)
mbox.appendChild(rdf.createTextNode(standardWebmaster))
|
envycontent/msd.rdfexport
|
msd/rdfexport/researcher/helper/webpresence.py
|
Python
|
gpl-2.0
| 1,575
|
#!/usr/bin/env python
'''
BlueBanana Rat Config Decoder
'''
__description__ = 'BlueBanana Rat Config Extractor'
__author__ = 'Kevin Breen http://techanarchy.net http://malwareconfig.com'
__version__ = '0.1'
__date__ = '2014/04/10'
#Standard Imports Go Here
import os
import sys
import string
from zipfile import ZipFile
from cStringIO import StringIO
from optparse import OptionParser
#Non Standard Imports
try:
from Crypto.Cipher import AES
except ImportError:
print "[+] Couldn't Import Cipher, try 'sudo pip install pycrypto'"
# Main Decode Function Goes Here
'''
data is a read of the file
Must return a python dict of values
'''
def run(data):
newZip = StringIO(data)
with ZipFile(newZip) as zip:
for name in zip.namelist(): # get all the file names
if name == "config.txt": # this file contains the encrypted config
conFile = zip.read(name)
if conFile: #
confRaw = decryptConf(conFile)
conf = configParse(confRaw)
return conf
#Helper Functions Go Here
def DecryptAES(enckey, data):
cipher = AES.new(enckey) # set the cipher
return cipher.decrypt(data) # decrpyt the data
def decryptConf(conFile):
key1 = "15af8sd4s1c5s511"
key2 = "4e3f5a4c592b243f"
first = DecryptAES(key1, conFile.decode('hex'))
second = DecryptAES(key2, first[:-16].decode('hex'))
return second
def configParse(confRaw):
config = {}
clean = filter(lambda x: x in string.printable, confRaw)
list = clean.split("<separator>")
config["Domain"] = list[0]
config["Password"] = list[1]
config["Port1"] = list[2]
config["Port2"] = list[3]
if len(list) > 4:
config["Install Name"] = list[4]
config["Jar Name"] = list[5]
return config
#Recursive Function Goes Here
# Main
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
parser.add_option("-r", "--recursive", action='store_true', default=False, help="Recursive Mode")
(options, args) = parser.parse_args()
# If we dont have args quit with help page
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
# if we want a recursive extract run this function
if options.recursive == True:
print "[+] Sorry Not Here Yet Come Back Soon"
# If not recurisve try to open file
try:
print "[+] Reading file"
fileData = open(args[0], 'rb').read()
except:
print "[+] Couldn't Open File {0}".format(args[0])
#Run the config extraction
print "[+] Searching for Config"
config = run(fileData)
#If we have a config figure out where to dump it out.
if config == None:
print "[+] Config not found"
sys.exit()
#if you gave me two args im going to assume the 2nd arg is where you want to save the file
if len(args) == 2:
print "[+] Writing Config to file {0}".format(args[1])
with open(args[1], 'a') as outFile:
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
# if no seconds arg then assume you want it printing to screen
else:
print "[+] Printing Config to screen"
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
print " [-] Key: {0}\t Value: {1}".format(key,clean_value)
print "[+] End of Config"
|
1ookup/RATDecoders
|
BlueBanana.py
|
Python
|
gpl-2.0
| 3,304
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
ledpin = 18
GPIO.setup(ledpin, GPIO.OUT)
try:
while True:
GPIO.output(ledpin, 1)
time.sleep(0.5)
GPIO.output(ledpin, 0)
time.sleep(0.5)
except KeyboardInterrupt:
GPIO.cleanup()
|
VA3SFA/rpi_hw_demo
|
led/blink.py
|
Python
|
gpl-2.0
| 312
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2013 Martijn Kaijser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmc
import xbmcaddon
### get addon info
__addon__ = xbmcaddon.Addon(id='script.artwork.downloader')
__addonid__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
__author__ = __addon__.getAddonInfo('author')
__version__ = __addon__.getAddonInfo('version')
__addonpath__ = __addon__.getAddonInfo('path')
__addonprofile__= xbmc.translatePath(__addon__.getAddonInfo('profile')).decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
__localize__ = __addon__.getLocalizedString
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/script.artwork.downloader/lib/common.py
|
Python
|
gpl-2.0
| 1,305
|
# coding=utf-8
"""
Widgets Module
"""
__author__ = 'Matt Eland'
|
IntegerMan/Pi-MFD
|
PiMFD/UI/Widgets/__init__.py
|
Python
|
gpl-2.0
| 66
|
#!/usr/bin/python3
# requires:
# - pysocks for socks5 proxy socket, required by requesocks
# - certifi for HTTPS certificate validation, also used in depths of requesocks
# - requesocks
import requesocks
import certifi
#SOCKS5_PROXY = '127.0.0.1:9050'
SOCKS5_PROXY = '192.168.20.1:9050'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/45.0.2454.85 Safari/537.36'
def main():
session = requesocks.session()
session.proxies = {
'http': 'socks5://{0}'.format(SOCKS5_PROXY),
'https': 'socks5://{0}'.format(SOCKS5_PROXY)
}
session.headers.update({'user-agent': USER_AGENT})
# url = 'http://yandex.ru/internet'
# url = 'https://www.whatismyip.com/my-ip-information/'
url = 'http://httpbin.org/ip'
print('Using proxy: {0}'.format(SOCKS5_PROXY))
print('Requesting URL: {0}'.format(url))
r = session.get(url)
if r.status_code == 200:
text = r.text
if text is None:
if type(r.content) == bytes:
text = r.content.decode('UTF-8')
if text:
print(text)
with open('res.html', 'wt', encoding=r.encoding) as f:
f.write(text)
else:
print('status code: {0}'.format(r.status_code))
if __name__ == '__main__':
main()
|
minlexx/xnovacmd
|
tests/test_requesocks.py
|
Python
|
gpl-2.0
| 1,339
|
#!/usr/bin/python
# -*- coding: UTF-8, tab-width: 4 -*-
# Python Coding Style: http://docs.python.org/tutorial/controlflow.html#intermezzo-coding-style
# Command Line Arguments Parser: http://docs.python.org/library/argparse.html
from __future__ import division
from sys import argv, stdout, stderr
from codecs import open as cfopen
def main(invocation, *cli_args):
HEIDI_CHARSET = 'UTF-8'
INI_CHARSET = 'UTF-8-sig'
if len(cli_args) < 1:
raise ValueError('not enough parameters. required: ConfigFileName')
cfg_fn = cli_args[0]
cfg_realms = {
'app': {},
'srv': {},
}
for cfg_ln in cfopen(cfg_fn, 'r', HEIDI_CHARSET):
cfg_key, cfg_fmt, cfg_value = cfg_ln.rstrip().split('<|||>', 2)
cfg_realm, cfg_sect, cfg_key = split_cfg_key(cfg_key)
if (cfg_realm, cfg_key) == ('srv', 'Password'):
cfg_value = decode_heidi_password(cfg_value)
cfg_realm = cfg_realms[cfg_realm]
sect_dict = cfg_realm.get(cfg_sect)
if sect_dict is None:
sect_dict = cfg_realm[cfg_sect] = {}
sect_dict[cfg_key] = cfg_fmt + '|' + cfg_value
ini_fn = 'heidisql.ini'
write_ini(cfopen(ini_fn, 'w', INI_CHARSET), cfg_realms['app'])
for cfg_sect, sect_dict in cfg_realms['srv'].items():
ini_fn = sanitize_file_name(cfg_sect).lower() + '.ini'
write_ini(cfopen(ini_fn, 'w', INI_CHARSET), { cfg_sect: sect_dict })
def write_ini(dest, ini):
for sect_name in sorted(ini.keys()):
sect_dict = ini[sect_name]
dest.write('[' + sect_name + ']\n')
for opt_name in sorted(sect_dict.keys()):
opt_value = sect_dict[opt_name]
dest.write(opt_name + '=' + opt_value + '\n')
dest.write('\n')
def split_at_first_nonalpha(idstr, defaultPrefix=None):
for pos, chr in enumerate(idstr):
if not chr.isalpha():
pos += 1
return idstr[0:pos], idstr[pos:]
return defaultPrefix, idstr
def split_cfg_key(key):
if key.startswith('Servers\\'):
sect, key = key.split('\\', 2)[1:]
return 'srv', sect, key
form_part = key.split('.', 1)
if len(form_part) == 2:
# [u'ColPositions_connform', u'ListSessions']
if form_part[0].lower().endswith('form'):
form_prop, form_part = form_part
form_prop = form_prop.split('_')
if len(form_prop) == 2:
# [u'ColPositions', u'connform']
form_prop, form_name = form_prop
sect = form_name
key = form_part + '.' + form_prop
return 'app', sect, key
return 'app', 'HeidiSQL', key
def decode_heidi_password(obfus):
obfus, caesar_key = obfus[:-1], obfus[-1:]
caesar_key = -int(caesar_key, 16)
clean = ''
while obfus != '':
cnum, obfus = obfus[:2], obfus[2:]
cnum = int(cnum, 16)
cnum += caesar_key
char = None
if (31 < cnum) and (cnum < 127):
char = chr(cnum)
if char in ('\\', '"', "'"):
char = None
if char is None:
char = '\\u00' + hex(cnum).replace('0x', '00')[-2:]
# print cnum, hex(cnum), char
clean += char
return '"' + clean + '"'
def sanitize_file_name(wild):
sane = ''
for char in wild:
# print repr(char),
if char.isalnum() or (char in '@-'):
if repr(char)[2:-1] != char:
# this alnum might be too fancy for some file systems.
continue
sane += char
continue
# if char.isspace():
char = '_'
if not sane.endswith(char):
sane += char
# print repr(sane)
return sane
if __name__ == '__main__':
main(*argv)
|
mk-pmb/heidisql-ubuntu-util
|
cfg.unpack.py
|
Python
|
gpl-2.0
| 3,798
|
#!/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eduardo'
import unittest
import os
import os.path
import cocar.tests
from ..session import Host, SnmpSession, NmapSession
from ..model import network
from .. import utils
class TestDiscover(unittest.TestCase):
"""
Testa descoberta de ativos de rede utilizando snmp
"""
def setUp(self):
"""
Parâmetros iniciais
"""
self.activeip = '127.0.0.1'
self.inactiveip = '127.1.1.1'
self.localhost = '127.0.0.1'
self.data_dir = cocar.tests.cocar.cocar_data_dir
local_network = utils.get_local_network()
self.network = network.Network(
network_ip=str(local_network.cidr),
name='Rede de teste'
)
def test_active(self):
"""
Teste que verifica se o ativo de rede está ativo
"""
session = SnmpSession(DestHost=self.activeip)
result = session.query()
print(result.query[0])
self.assertIsNotNone(result.query[0])
def test_inactive(self):
"""
Teste que identifica que um ativo de rede está inativo
"""
session = SnmpSession(DestHost=self.inactiveip)
result = session.query()
print(result.query[0])
self.assertIsNone(result.query[0])
def test_scan(self):
"""
Teste que realiza o scan em todas as informações do ativo
"""
outfile = self.data_dir + "/" + self.localhost + ".xml"
session = NmapSession(
host=self.localhost,
outfile=outfile
)
result = session.scan()
assert result
# Tenta achar o arquivo
assert (os.path.isfile(outfile))
def test_scan_rede_full(self):
"""
Realiza busca em todos os IP's da rede e grava resultados num arquivo específico
"""
outfile = self.data_dir + "/" + self.network.network_ip.cidr + ".xml"
session = NmapSession(
host=self.network.network_ip.cidr,
outfile=outfile
)
session.scan()
# List all IP's from directory
self.assertTrue(os.path.isfile(session.outfile))
# Apaga arquivo
os.unlink(session.outfile)
def test_scan_rede(self):
"""
Realiza busca rápida em todos os IP's da rede e grava resultados num arquivo específico
"""
outfile = self.data_dir + "/" + self.network.network_ip.cidr + ".xml"
session = NmapSession(
host=self.network.network_ip.cidr,
full=False,
outfile=outfile
)
session.scan()
# List all IP's from directory
self.assertTrue(os.path.isfile(session.outfile))
# Apaga arquivo
os.unlink(session.outfile)
def tearDown(self):
"""
Apaga dados inicias
"""
# shutil.rmtree(self.data_dir)
pass
|
lightbase/cocar-agente
|
cocar/tests/test_discover.py
|
Python
|
gpl-2.0
| 2,936
|
__author__ = 'herald olivares'
# -*- coding: utf-8 -*-
from django.contrib import admin
from upc.sunat.models import Person, Concept, Debt
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'ruc', 'phone', 'type')
class ConceptAdmin(admin.ModelAdmin):
pass
class DebtAdmin(admin.ModelAdmin):
list_display = ('concept', 'person', 'period', 'tax_code', 'resolution_number', 'amount')
admin.site.register(Person, PersonAdmin)
admin.site.register(Concept, ConceptAdmin)
admin.site.register(Debt, DebtAdmin)
|
heraldmatias/dew
|
django-sunat/src/upc/sunat/admin.py
|
Python
|
gpl-2.0
| 532
|
# -*- coding: utf-8 -*-
import codecs
import json
import unittest
from pkg_resources import resource_filename
from calmjs.parse import es5
class ExamplesTestCase(unittest.TestCase):
"""
A test case that automatically load the examples JS module into the
data attribute for the test instance, and to provide a shortcut for
doing assertions.
"""
data = None
test_module_name = 'nunja.stock.tests'
test_examples = NotImplemented
@classmethod
def setUpClass(cls):
if cls.test_examples is NotImplemented:
raise ValueError(
'the class must define the test_examples attribute for data')
with codecs.open(
resource_filename(cls.test_module_name, cls.test_examples),
encoding='utf8') as fd:
cls.data = json.loads(str(es5(fd.read()).children()[0].children(
)[0].initializer))
# TODO also sanity check the resulting object?
def assertDataEqual(self, key, result):
answer = self.data[key][0]
self.assertEqual(answer, result)
|
calmjs/nunja.stock
|
src/nunja/stock/testing/case.py
|
Python
|
gpl-2.0
| 1,097
|
"""
Collection of tools for the IQTools library
Xaratustrah
2017
"""
import os
import logging as log
from scipy.signal import hilbert
from scipy.io import wavfile
import xml.etree.ElementTree as et
import numpy as np
import types
import uproot3
import uproot3_methods.classes.TH1
from iqtools.iqbase import IQBase
from iqtools.tcapdata import TCAPData
from iqtools.tdmsdata import TDMSData
from iqtools.bindata import BINData
from iqtools.iqtdata import IQTData
from iqtools.tiqdata import TIQData
from iqtools.csvdata import CSVData
from iqtools.wavdata import WAVData
from iqtools.xdatdata import XDATData
# ------------ TOOLS ----------------------------
def get_iq_object(filename, header_filename=None):
"""
Return suitable object accorting to extension.
Parameters
----------
filename
Returns
-------
"""
# Object generation
_, file_extension = os.path.splitext(filename)
iq_data = None
if file_extension.lower() == '.txt' or file_extension.lower() == '.csv':
log.info('This is an ASCII file.')
iq_data = CSVData(filename)
if file_extension.lower() == '.bin':
log.info('This is a raw binary file.')
iq_data = BINData(filename)
if file_extension.lower() == '.wav':
log.info('This is a wav file.')
iq_data = WAVData(filename)
if file_extension.lower() == '.iqt':
log.info('This is an iqt file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.iq':
log.info('This is an iq file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.tiq':
log.info('This is a tiq file.')
iq_data = TIQData(filename)
if file_extension.lower() == '.tdms':
log.info('This is a TDMS file.')
iq_data = TDMSData(filename)
if file_extension.lower() == '.dat':
log.info('This is a TCAP file.')
if not header_filename:
log.info('TCAP files need a text header file as well. Aborting....')
return None
else:
iq_data = TCAPData(filename, header_filename)
if file_extension.lower() == '.xdat':
log.info('This is a XDAT file.')
if not header_filename:
log.info('XDAT files need a text header file as well. Aborting....')
return None
else:
iq_data = XDATData(filename, header_filename)
return iq_data
def get_eng_notation(value, unit='', decimal_place=2):
"""
Convert numbers to scientific notation
Parameters
----------
value input number float or integer
decimal_place How many decimal places should be left
unit The unit will be shown, otherwise powers of ten
Returns
-------
"""
ref = {24: 'Y', 21: 'Z', 18: 'E', 15: 'P',
12: 'T', 9: 'G', 6: 'M', 3: 'k', 0: '',
-3: 'm', -6: 'u', -9: 'n', -12: 'p',
-15: 'f', -18: 'a', -21: 'z', -24: 'y',
}
if value == 0:
return '{}{}'.format(0, unit)
flag = '-' if value < 0 else ''
num = max([key for key in ref.keys() if abs(value) >= 10 ** key])
if num == 0:
mult = ''
else:
mult = ref[num] if unit else 'e{}'.format(num)
return '{}{}{}{}'.format(flag, int(abs(value) / 10 ** num * 10 ** decimal_place) / 10 ** decimal_place, mult,
unit)
def get_cplx_spectrogram(x, nframes, lframes):
sig = np.reshape(x, (nframes, lframes))
zz = np.fft.fft(sig, axis=1)
return zz
def get_inv_cplx_spectrogram(zz, nframes, lframes):
inv_zz = np.fft.ifft(zz, axis=1)
inv_zz = np.reshape(inv_zz, (1, nframes * lframes))[0]
return inv_zz
def get_root_th2d(xx, yy, zz, name='', title=''):
from ROOT import TH2D
h = TH2D(name, title, np.shape(xx)[
1], xx[0, 0], xx[0, -1], np.shape(yy)[0], yy[0, 0], yy[-1, 0])
for j in range(np.shape(yy)[0]):
for i in range(np.shape(xx)[1]):
h.SetBinContent(i, j, zz[j, i])
return h
def make_test_signal(f, fs, length=1, nharm=0, noise=False):
"""Make a sine signal with/without noise."""
t = np.arange(0, length, 1 / fs)
x = np.zeros(len(t))
for i in range(nharm + 2):
x += np.sin(2 * np.pi * i * f * t)
if noise:
x += np.random.normal(0, 1, len(t))
return t, x
def shift_phase(x, phase):
"""
Shift phase in frequency domain
x: complex or analytical signal
phase: amount in radians
returns: shifted complex signal
"""
XX = np.fft.fft(x)
angle = np.unwrap(np.angle(XX)) + phase
YY = np.abs(XX) * np.exp(1j * angle)
return np.fft.ifft(YY)
def write_signal_to_bin(cx, filename, fs=1, center=0, write_header=True):
"""
filename: name of the output filename
x: data vector to write to filename
fs: sampling Frequency
center: center Frequency
write_header: if set to true, then the first 4 bytes of the file are 32-bit
sampling Frequency and then follows the center frequency also in 32-bit. the
Data follows afterwards in I, Q format each 32-bit as well.
"""
# 32-bit little endian floats
# insert header
if write_header:
cx = np.insert(cx, 0, complex(fs, center))
cx = cx.astype(np.complex64)
cx.tofile(filename + '.bin')
def write_signal_to_csv(filename, cx, fs=1, center=0):
# insert ascii header which looks like a complex number
cx = np.insert(cx, 0, complex(fs, center))
with open(filename + '.csv', 'w') as f:
for i in range(len(cx)):
f.write('{}|{}\n'.format(
np.real(cx[i]), np.imag(cx[i])))
def write_signal_to_wav(filename, cx, fs=1):
""" Save the singal as an audio wave """
wavfile.write(filename + '.wav', fs,
abs(cx) / max(abs(cx)))
def make_analytical(x):
"""Make an analytical signal from the real signal"""
yy = hilbert(x)
ii = np.real(yy)
qq = np.imag(yy)
x_bar = np.vectorize(complex)(ii, qq)
ins_ph = np.angle(x_bar) * 180 / np.pi
return x_bar, ins_ph
def read_result_csv(filename):
"""
Read special format CSV result file from RSA5000 series output
:param filename:
:return:
"""
p = np.genfromtxt(filename, skip_header=63)
with open(filename) as f:
cont = f.readlines()
for l in cont:
l = l.split(',')
if 'Frequency' in l and len(l) == 3:
center = float(l[1])
if 'XStart' in l and len(l) == 3:
start = float(l[1])
if 'XStop' in l and len(l) == 3:
stop = float(l[1])
f = np.linspace(start - center, stop - center, len(p))
return f, p
def read_specan_xml(filename):
"""
Read the resulting saved trace file Specan from the Tektronix RSA5000 series
these files are produced while saving traces.
:param filename:
:return:
"""
with open(filename, 'rb') as f:
ba = f.read()
xml_tree_root = et.fromstring(ba)
for elem in xml_tree_root.iter(tag='Count'):
count = int(elem.text)
for elem in xml_tree_root.iter(tag='XStart'):
start = float(elem.text)
for elem in xml_tree_root.iter(tag='XStop'):
stop = float(elem.text)
for elem in xml_tree_root.iter(tag='XUnits'):
xunits = elem.text
for elem in xml_tree_root.iter(tag='YUnits'):
yunits = elem.text
for elem in xml_tree_root.iter(tag='y'):
pwr = float(elem.text)
p = np.zeros(count)
i = 0
for elem in xml_tree_root.iter(tag='y'):
p[i] = float(elem.text)
i += 1
f = np.linspace(start, stop, count)
return f, p, (xunits, yunits)
def read_data_csv(filename):
"""
Read special format CSV data file from RSA5100 series output.
Please note that 50 ohm power termination is already considered
for these data.
:param filename:
:return:
"""
data = np.genfromtxt(filename, skip_header=10, delimiter=",")
# has one dimension more, should use ravel
data = np.ravel(data).view(dtype='c16')
return data
def parse_filename(filename):
"""
Parses filenames of experimental data in the following format:
58Ni26+_374MeVu_250uA_pos_0_0.tiq
:param filename:
:return:
"""
filename = filename.split('_')
descr = filename[0]
energy = float(filename[1].replace('MeVu', 'e6'))
current = float(filename[2].replace('uA', 'e-6'))
return descr, energy, current
def write_timedata_to_npy(iq_obj):
"""Saves the dictionary to a numpy file."""
np.save(iq_obj.filename_wo_ext + '.npy', vars(iq_obj))
def write_timedata_to_root(iq_obj):
with uproot3.recreate(iq_obj.filename_wo_ext + '.root') as f:
f['t_f_samp'] = uproot3.newtree(
{'f_samp': uproot3.newbranch(np.int32, title='Sampling frequency'),
})
f['t_f_center'] = uproot3.newtree(
{'f_center': uproot3.newbranch(np.int32, title='Center frequency'),
})
f['t_timedata'] = uproot3.newtree(
{'timedata': uproot3.newbranch(np.float64, title='Time domain signal power')})
f['t_f_samp'].extend({'f_samp': np.array([int(iq_obj.fs)])})
f['t_f_center'].extend({'f_center': np.array([int(iq_obj.center)])})
f['t_timedata'].extend({'timedata': np.abs(iq_obj.data_array)**2})
def write_spectrum_to_csv(ff, pp, filename, center=0):
a = np.concatenate(
(ff, pp, IQBase.get_dbm(pp)))
b = np.reshape(a, (3, -1)).T
np.savetxt(filename, b, header='Delta f [Hz] @ {:.2e} [Hz]|Power [W]|Power [dBm]'.format(
center), delimiter='|')
def write_spectrum_to_root(ff, pp, filename, center=0, title=''):
class MyTH1(uproot3_methods.classes.TH1.Methods, list):
def __init__(self, low, high, values, title=""):
self._fXaxis = types.SimpleNamespace()
self._fXaxis._fNbins = len(values)
self._fXaxis._fXmin = low
self._fXaxis._fXmax = high
values.insert(0, 0)
values.append(0)
for x in values:
self.append(float(x))
self._fTitle = title
self._classname = "TH1F"
th1f = MyTH1(center + ff[0], center + ff[-1], pp.tolist(), title=title)
file = uproot3.recreate(filename + '.root', compression=uproot3.ZLIB(4))
file["th1f"] = th1f
|
xaratustrah/iq_suite
|
iqtools/tools.py
|
Python
|
gpl-2.0
| 10,337
|
#!/usr/bin/python
import os
import shutil
import zipfile
import zlib
import os.path
def _ignore(src, name ):
if src == './UPLOAD/':
print name
return ['RESTDIR', 'conf.inc.php', 'Nuked-Klan.zip', '.hg', 'make.py']
else:
return []
def _RecImport(src, dst, zip):
elements = os.listdir(src)
ignored = _ignore('.' + dst, elements)
for delete in ignored:
try:
elements.remove(delete)
except ValueError:
pass
for ele in elements:
if os.path.isfile(src + ele):
zip.write(src + ele, dst + ele)
print dst + ele
elif os.path.isdir(src + ele):
_RecImport(src + ele + '/', dst + ele + '/', zip)
try:
shutil.rmtree('tmp')
except OSError:
pass
file = 'Nuked-Klan.zip'
try:
os.remove(file)
except OSError:
pass
zip = zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED, False)
_RecImport('RESTDIR/', '/', zip)
_RecImport('./', '/UPLOAD/', zip)
|
donaldinou/nuked-gamer
|
make.py
|
Python
|
gpl-2.0
| 988
|
# Calliope
# Copyright (C) 2017, 2018 Sam Thursfield <sam@afuera.me.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import testutils
@pytest.fixture()
def cli():
'''Fixture for testing through the `cpe` commandline interface.'''
return testutils.Cli()
|
ssssam/calliope
|
tests/conftest.py
|
Python
|
gpl-2.0
| 872
|
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisidev import unittest
from .key_signature import KeySignature
class KeySignatureTest(unittest.TestCase):
def test_equal(self):
self.assertEqual(KeySignature(name='C major'),
KeySignature(name='C major'))
self.assertNotEqual(KeySignature(name='C major'),
KeySignature(name='G major'))
def test_compare_with_bad_class(self):
with self.assertRaises(TypeError):
# pylint: disable=expression-not-assigned
KeySignature() == 'foo'
def test_preset_names(self):
self.assertEqual(KeySignature(name='G major').accidentals, ['F#'])
self.assertEqual(KeySignature(name='G minor').accidentals, ['Bb', 'Eb'])
|
odahoda/noisicaa
|
noisicaa/value_types/key_signature_test.py
|
Python
|
gpl-2.0
| 1,559
|
from functools import partial
from navmazing import NavigateToSibling, NavigateToAttribute
from cfme import Credential
from cfme.exceptions import CandidateNotFound, OptionNotAvailable
import cfme.fixtures.pytest_selenium as sel
import cfme.web_ui.toolbar as tb
from cfme.web_ui import (
AngularSelect, Form, Select, CheckboxTree, accordion, fill, flash,
form_buttons, Input, Table, UpDownSelect, CFMECheckbox, BootstrapTreeview)
from cfme.web_ui.form_buttons import change_stored_password
from utils import version
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from utils.log import logger
from utils.pretty import Pretty
from utils.update import Updateable
tb_select = partial(tb.select, "Configuration")
pol_btn = partial(tb.select, "Policy")
edit_tags_form = Form(
fields=[
("select_tag", Select("select#tag_cat")),
("select_value", Select("select#tag_add"))
])
tag_table = Table("//div[@id='assignments_div']//table")
users_table = Table("//div[@id='records_div']//table")
group_order_selector = UpDownSelect(
"select#seq_fields",
"//img[@alt='Move selected fields up']",
"//img[@alt='Move selected fields down']")
def simple_user(userid, password):
creds = Credential(principal=userid, secret=password)
return User(name=userid, credential=creds)
class User(Updateable, Pretty, Navigatable):
user_form = Form(
fields=[
('name_txt', Input('name')),
('userid_txt', Input('userid')),
('password_txt', Input('password')),
('password_verify_txt', Input('verify')),
('email_txt', Input('email')),
('user_group_select', AngularSelect('chosen_group')),
])
pretty_attrs = ['name', 'group']
def __init__(self, name=None, credential=None, email=None, group=None, cost_center=None,
value_assign=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.credential = credential
self.email = email
self.group = group
self.cost_center = cost_center
self.value_assign = value_assign
self._restore_user = None
def __enter__(self):
if self._restore_user != self.appliance.user:
from cfme.login import logout
logger.info('Switching to new user: %s', self.credential.principal)
self._restore_user = self.appliance.user
logout()
self.appliance.user = self
def __exit__(self, *args, **kwargs):
if self._restore_user != self.appliance.user:
from cfme.login import logout
logger.info('Restoring to old user: %s', self._restore_user.credential.principal)
logout()
self.appliance.user = self._restore_user
self._restore_user = None
def create(self):
navigate_to(self, 'Add')
fill(self.user_form, {'name_txt': self.name,
'userid_txt': self.credential.principal,
'password_txt': self.credential.secret,
'password_verify_txt': self.credential.verify_secret,
'email_txt': self.email,
'user_group_select': getattr(self.group,
'description', None)},
action=form_buttons.add)
flash.assert_success_message('User "{}" was saved'.format(self.name))
def update(self, updates):
navigate_to(self, 'Edit')
change_stored_password()
new_updates = {}
if 'credential' in updates:
new_updates.update({
'userid_txt': updates.get('credential').principal,
'password_txt': updates.get('credential').secret,
'password_verify_txt': updates.get('credential').verify_secret
})
if self.appliance.version >= '5.7':
self.name = updates.get('credential').principal
new_updates.update({
'name_txt': updates.get('name'),
'email_txt': updates.get('email'),
'user_group_select': getattr(
updates.get('group'),
'description', None)
})
fill(self.user_form, new_updates, action=form_buttons.save)
flash.assert_success_message(
'User "{}" was saved'.format(updates.get('name', self.name)))
def copy(self):
navigate_to(self, 'Details')
tb.select('Configuration', 'Copy this User to a new User')
new_user = User(name=self.name + "copy",
credential=Credential(principal='redhat', secret='redhat'))
change_stored_password()
fill(self.user_form, {'name_txt': new_user.name,
'userid_txt': new_user.credential.principal,
'password_txt': new_user.credential.secret,
'password_verify_txt': new_user.credential.verify_secret},
action=form_buttons.add)
flash.assert_success_message('User "{}" was saved'.format(new_user.name))
return new_user
def delete(self):
navigate_to(self, 'Details')
tb.select('Configuration', 'Delete this User', invokes_alert=True)
sel.handle_alert()
flash.assert_success_message('EVM User "{}": Delete successful'.format(self.name))
def edit_tags(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this User", invokes_alert=True)
fill(edit_tags_form, {'select_tag': tag,
'select_value': value},
action=form_buttons.save)
flash.assert_success_message('Tag edits were successfully saved')
def remove_tag(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this User", invokes_alert=True)
row = tag_table.find_row_by_cells({'category': tag, 'assigned_value': value},
partial_check=True)
sel.click(row[0])
form_buttons.save()
flash.assert_success_message('Tag edits were successfully saved')
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@property
def description(self):
return self.credential.principal
@navigator.register(User, 'All')
class UserAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree(
"Access Control",
self.obj.appliance.server.zone.region.settings_string, "Users")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(User, 'Add')
class UserAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select("Add a new User")
@navigator.register(User, 'Details')
class UserDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
accordion.tree(
"Access Control",
self.obj.appliance.server.zone.region.settings_string,
"Users",
self.obj.name
)
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(User, 'Edit')
class UserEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
tb_select('Edit this User')
class Group(Updateable, Pretty, Navigatable):
group_form = Form(
fields=[
('ldap_groups_for_user', AngularSelect("ldap_groups_user")),
('description_txt', Input('description')),
('lookup_ldap_groups_chk', Input('lookup')),
('role_select', AngularSelect("group_role")),
('group_tenant', AngularSelect("group_tenant"), {"appeared_in": "5.5"}),
('user_to_look_up', Input('user')),
('username', Input('user_id')),
('password', Input('password')),
])
pretty_attrs = ['description', 'role']
def __init__(self, description=None, role=None, tenant="My Company", user_to_lookup=None,
ldap_credentials=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.description = description
self.role = role
self.tenant = tenant
self.ldap_credentials = ldap_credentials
self.user_to_lookup = user_to_lookup
def create(self):
navigate_to(self, 'Add')
fill(self.group_form, {'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant},
action=form_buttons.add)
flash.assert_success_message('Group "{}" was saved'.format(self.description))
def _retrieve_ldap_user_groups(self):
navigate_to(self, 'Add')
fill(self.group_form, {'lookup_ldap_groups_chk': True,
'user_to_look_up': self.user_to_lookup,
'username': self.ldap_credentials.principal,
'password': self.ldap_credentials.secret,
},)
sel.wait_for_element(form_buttons.retrieve)
sel.click(form_buttons.retrieve)
def _retrieve_ext_auth_user_groups(self):
navigate_to(self, 'Add')
fill(self.group_form, {'lookup_ldap_groups_chk': True,
'user_to_look_up': self.user_to_lookup,
},)
sel.wait_for_element(form_buttons.retrieve)
sel.click(form_buttons.retrieve)
def add_group_from_ldap_lookup(self):
self._retrieve_ldap_user_groups()
fill(self.group_form, {'ldap_groups_for_user': self.description,
'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant,
},
action=form_buttons.add)
flash.assert_success_message('Group "{}" was saved'.format(self.description))
def add_group_from_ext_auth_lookup(self):
self._retrieve_ext_auth_user_groups()
fill(self.group_form, {'ldap_groups_for_user': self.description,
'description_txt': self.description,
'role_select': self.role,
'group_tenant': self.tenant,
},
action=form_buttons.add)
flash.assert_success_message('Group "{}" was saved'.format(self.description))
def update(self, updates):
navigate_to(self, 'Edit')
fill(self.group_form, {'description_txt': updates.get('description'),
'role_select': updates.get('role'),
'group_tenant': updates.get('tenant')},
action=form_buttons.save)
flash.assert_success_message(
'Group "{}" was saved'.format(updates.get('description', self.description)))
def delete(self):
navigate_to(self, 'Details')
tb_select('Delete this Group', invokes_alert=True)
sel.handle_alert()
flash.assert_success_message('EVM Group "{}": Delete successful'.format(self.description))
def edit_tags(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this Group", invokes_alert=True)
fill(edit_tags_form, {'select_tag': tag,
'select_value': value},
action=form_buttons.save)
flash.assert_success_message('Tag edits were successfully saved')
def remove_tag(self, tag, value):
navigate_to(self, 'Details')
pol_btn("Edit 'My Company' Tags for this Group", invokes_alert=True)
row = tag_table.find_row_by_cells({'category': tag, 'assigned_value': value},
partial_check=True)
sel.click(row[0])
form_buttons.save()
flash.assert_success_message('Tag edits were successfully saved')
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@navigator.register(Group, 'All')
class GroupAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree("Access Control", self.obj.appliance.server_region_string(), "Groups")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Group, 'Add')
class GroupAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select("Add a new Group")
@navigator.register(Group, 'EditGroupSequence')
class EditGroupSequence(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select('Edit Sequence of User Groups for LDAP Look Up')
@navigator.register(Group, 'Details')
class GroupDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
accordion.tree(
"Access Control", self.obj.appliance.server_region_string(),
"Groups", self.obj.description
)
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Group, 'Edit')
class GroupEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
tb_select('Edit this Group')
def get_group_order():
navigate_to(Group, 'EditGroupSequence')
return group_order_selector.get_items()
def set_group_order(items):
original_order = get_group_order()
# We pick only the same amount of items for comparing
original_order = original_order[:len(items)]
if items == original_order:
return # Ignore that, would cause error on Save click
fill(group_order_selector, items)
sel.click(form_buttons.save)
class Role(Updateable, Pretty, Navigatable):
form = Form(
fields=[
('name_txt', Input('name')),
('vm_restriction_select', AngularSelect('vm_restriction')),
('product_features_tree', {
version.LOWEST: CheckboxTree("//div[@id='features_treebox']/ul"),
'5.7': BootstrapTreeview("features_treebox")}),
])
pretty_attrs = ['name', 'product_features']
def __init__(self, name=None, vm_restriction=None, product_features=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.vm_restriction = vm_restriction
self.product_features = product_features or []
def create(self):
navigate_to(self, 'Add')
fill(self.form, {'name_txt': self.name,
'vm_restriction_select': self.vm_restriction,
'product_features_tree': self.product_features},
action=form_buttons.add)
flash.assert_success_message('Role "{}" was saved'.format(self.name))
def update(self, updates):
navigate_to(self, 'Edit')
fill(self.form, {'name_txt': updates.get('name'),
'vm_restriction_select': updates.get('vm_restriction'),
'product_features_tree': updates.get('product_features')},
action=form_buttons.save)
flash.assert_success_message('Role "{}" was saved'.format(updates.get('name', self.name)))
def delete(self):
navigate_to(self, 'Details')
tb_select('Delete this Role', invokes_alert=True)
sel.handle_alert()
flash.assert_success_message('Role "{}": Delete successful'.format(self.name))
def copy(self, name=None):
if not name:
name = self.name + "copy"
navigate_to(self, 'Details')
tb.select('Configuration', 'Copy this Role to a new Role')
new_role = Role(name=name)
fill(self.form, {'name_txt': new_role.name},
action=form_buttons.add)
flash.assert_success_message('Role "{}" was saved'.format(new_role.name))
return new_role
@navigator.register(Role, 'All')
class RoleAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree("Access Control", self.obj.appliance.server_region_string(), "Roles")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Role, 'Add')
class RoleAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
tb_select("Add a new Role")
@navigator.register(Role, 'Details')
class RoleDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
accordion.tree(
"Access Control", self.obj.appliance.server_region_string(), "Roles", self.obj.name
)
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Role, 'Edit')
class RoleEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
tb_select('Edit this Role')
class Tenant(Updateable, Pretty, Navigatable):
""" Class representing CFME tenants in the UI.
* Kudos to mfalesni *
The behaviour is shared with Project, which is the same except it cannot create more nested
tenants/projects.
Args:
name: Name of the tenant
description: Description of the tenant
parent_tenant: Parent tenant, can be None, can be passed as string or object
"""
save_changes = form_buttons.FormButton("Save changes")
# TODO:
# Temporary defining elements with "//input" as Input() is not working.Seems to be
# with html elements,looking into it.
quota_form = Form(
fields=[
('cpu_cb', CFMECheckbox('cpu_allocated')),
('cpu_txt', "//input[@id='id_cpu_allocated']"),
('memory_cb', CFMECheckbox('mem_allocated')),
('memory_txt', "//input[@id='id_mem_allocated']"),
('storage_cb', CFMECheckbox('storage_allocated')),
('storage_txt', "//input[@id='id_storage_allocated']"),
('vm_cb', CFMECheckbox('vms_allocated')),
('vm_txt', "//input[@id='id_vms_allocated']"),
('template_cb', CFMECheckbox('templates_allocated')),
('template_txt', "//input[@id='id_templates_allocated']")
])
tenant_form = Form(
fields=[
('name', Input('name')),
('description', Input('description'))
])
pretty_attrs = ["name", "description"]
@classmethod
def get_root_tenant(cls):
return cls(name="My Company", _default=True)
def __init__(self, name=None, description=None, parent_tenant=None, _default=False,
appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.description = description
self.parent_tenant = parent_tenant
self._default = _default
@property
def parent_tenant(self):
if self._default:
return None
if self._parent_tenant:
return self._parent_tenant
return self.get_root_tenant()
@parent_tenant.setter
def parent_tenant(self, tenant):
if tenant is not None and isinstance(tenant, Project):
# If we try to
raise ValueError("Project cannot be a parent object.")
if isinstance(tenant, basestring):
# If parent tenant is passed as string,
# we assume that tenant name was passed instead of object
tenant = Tenant(tenant)
self._parent_tenant = tenant
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return self.name == other.name
@property
def exists(self):
try:
navigate_to(self, 'Details')
return True
except CandidateNotFound:
return False
@property
def tree_path(self):
if self._default:
return [self.name]
else:
return self.parent_tenant.tree_path + [self.name]
@property
def parent_path(self):
return self.tree_path[:-1]
def create(self, cancel=False):
if self._default:
raise ValueError("Cannot create the root tenant {}".format(self.name))
navigate_to(self, 'Add')
fill(self.tenant_form, self, action=form_buttons.add)
if type(self) is Tenant:
flash.assert_success_message('Tenant "{}" was saved'.format(self.name))
elif type(self) is Project:
flash.assert_success_message('Project "{}" was saved'.format(self.name))
else:
raise TypeError(
'No Tenant or Project class passed to create method{}'.format(
type(self).__name__))
def update(self, updates):
navigate_to(self, 'Edit')
# Workaround - form is appearing after short delay
sel.wait_for_element(self.tenant_form.description)
fill(self.tenant_form, updates, action=self.save_changes)
flash.assert_success_message(
'Project "{}" was saved'.format(updates.get('name', self.name)))
def delete(self, cancel=False):
navigate_to(self, 'Details')
tb_select("Delete this item", invokes_alert=True)
sel.handle_alert(cancel=cancel)
flash.assert_success_message('Tenant "{}": Delete successful'.format(self.description))
def set_quota(self, **kwargs):
navigate_to(self, 'ManageQuotas')
# Workaround - form is appearing after short delay
sel.wait_for_element(self.quota_form.cpu_txt)
fill(self.quota_form, {'cpu_cb': kwargs.get('cpu_cb'),
'cpu_txt': kwargs.get('cpu'),
'memory_cb': kwargs.get('memory_cb'),
'memory_txt': kwargs.get('memory'),
'storage_cb': kwargs.get('storage_cb'),
'storage_txt': kwargs.get('storage'),
'vm_cb': kwargs.get('vm_cb'),
'vm_txt': kwargs.get('vm'),
'template_cb': kwargs.get('template_cb'),
'template_txt': kwargs.get('template')},
action=self.save_changes)
flash.assert_success_message('Quotas for Tenant "{}" were saved'.format(self.name))
@navigator.register(Tenant, 'All')
class TenantAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Configuration')
def step(self):
accordion.tree("Access Control", self.obj.appliance.server_region_string(), "Tenants")
def resetter(self):
accordion.refresh("Access Control")
@navigator.register(Tenant, 'Details')
class TenantDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
accordion.tree(
"Access Control", self.obj.appliance.server_region_string(),
"Tenants", *self.obj.tree_path
)
@navigator.register(Tenant, 'Add')
class TenantAdd(CFMENavigateStep):
def prerequisite(self, *args, **kwargs):
navigate_to(self.obj.parent_tenant, 'Details')
def step(self, *args, **kwargs):
if isinstance(self.obj, Tenant):
add_selector = 'Add child Tenant to this Tenant'
elif isinstance(self.obj, Project):
add_selector = 'Add Project to this Tenant'
else:
raise OptionNotAvailable('Object type unsupported for Tenant Add: {}'
.format(type(self.obj).__name__))
tb.select('Configuration', add_selector)
@navigator.register(Tenant, 'Edit')
class TenantEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
tb.select('Configuration', 'Edit this item')
@navigator.register(Tenant, 'ManageQuotas')
class TenantManageQuotas(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
tb.select('Configuration', 'Manage Quotas')
class Project(Tenant):
""" Class representing CFME projects in the UI.
Project cannot create more child tenants/projects.
Args:
name: Name of the project
description: Description of the project
parent_tenant: Parent project, can be None, can be passed as string or object
"""
pass
|
jdemon519/cfme_tests
|
cfme/configure/access_control.py
|
Python
|
gpl-2.0
| 24,877
|
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2019 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib.menu_utils import item_post_treatment
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_info
import inputstreamhelper
import json
import re
import urlquick
# TODO
# Find a way for mpd inputstream not protected by DRM to be downloadable by youtube-dl
# Add date info to catch-up tv video
URL_ROOT = "https://www.alsace20.tv"
URL_LIVE = URL_ROOT + "/emb/live1"
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- ...
"""
resp = urlquick.get(URL_ROOT)
root = resp.parse("ul", attrs={"class": "menu-vod hidden-xs"})
for category_datas in root.iterfind(".//li"):
category_name = category_datas.find('.//a').text
if '#' in category_datas.find('.//a').get('href'):
category_url = URL_ROOT
else:
category_url = URL_ROOT + category_datas.find('.//a').get('href')
item = Listitem()
item.label = category_name
item.set_callback(
list_programs, item_id=item_id, category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_url, **kwargs):
"""
Build programs listing
- ...
"""
resp = urlquick.get(category_url)
root = resp.parse("div", attrs={"class": "emissions hidden-xs"})
for program_datas in root.iterfind(".//a"):
if 'VOD/est' in category_url:
if 'Est' in program_datas.get('href').split('/')[2]:
program_name = program_datas.find(
".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
elif 'VOD' in category_url:
if program_datas.get('href').split('/')[2] in category_url:
program_name = program_datas.find(
".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
else:
program_name = program_datas.find(".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_url, **kwargs):
resp = urlquick.get(program_url)
root = resp.parse("ul", attrs={"class": "list-vids"})
for video_datas in root.iterfind(".//li"):
video_title = video_datas.find('.//h2').text
video_image = URL_ROOT + '/videoimages/' + video_datas.find(
".//div[@class='img']").get('data-img')
video_plot = ''
if video_datas.find(".//div[@class='resume']").text is not None:
video_plot = video_datas.find(
".//div[@class='resume']").text.strip()
video_url = URL_ROOT + video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.set_callback(
get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=False)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(
video_url, headers={"User-Agent": web_utils.get_random_ua()}, max_age=-1)
root = resp.parse()
url_stream_datas = URL_ROOT + root.find(".//div[@class='HDR_VISIO']").get(
"data-url") + "&mode=html"
resp2 = urlquick.get(
url_stream_datas,
headers={"User-Agent": web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(resp2.text)
item = Listitem()
item.path = json_parser["files"]["auto"]
item.property["inputstreamaddon"] = "inputstream.adaptive"
item.property["inputstream.adaptive.manifest_type"] = "mpd"
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(
URL_LIVE, headers={"User-Agent": web_utils.get_random_ua()}, max_age=-1)
root = resp.parse()
url_live_datas = URL_ROOT + root.find(".//div[@class='HDR_VISIO']").get(
"data-url") + "&mode=html"
resp2 = urlquick.get(
url_live_datas,
headers={"User-Agent": web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(resp2.text)
item = Listitem()
item.path = json_parser["files"]["auto"]
item.property["inputstreamaddon"] = "inputstream.adaptive"
item.property["inputstream.adaptive.manifest_type"] = "mpd"
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
|
SylvainCecchetto/plugin.video.catchuptvandmore
|
plugin.video.catchuptvandmore/resources/lib/channels/fr/alsace20.py
|
Python
|
gpl-2.0
| 7,749
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2013-2014 Red Hat
# Author: Cleber Rosa <cleber@redhat.com>
# pylint: disable=E0611
from distutils.core import setup
from sphinx.setup_command import BuildDoc
import arc.version
setup(name='arc',
version=arc.version.VERSION,
description='Autotest RPC Client',
author='Cleber Rosa',
author_email='cleber@redhat.com',
url='http://autotest.github.com',
requires=['pygments'],
packages=['arc',
'arc.cli',
'arc.cli.args',
'arc.cli.actions',
'arc.shared',
'arc.tko'],
data_files=[('/etc/', ['data/arc.conf'])],
cmdclass={'build_doc': BuildDoc},
command_options={'build_doc': {'source_dir':
('setup.py', 'docs/source')}},
scripts=['scripts/arcli'])
|
autotest/arc
|
setup.py
|
Python
|
gpl-2.0
| 1,316
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='personalarticle',
unique_together=set([('sub', 'article')]),
),
migrations.AlterUniqueTogether(
name='subscription',
unique_together=set([('owner', 'feed')]),
),
]
|
aaronkurtz/gourmand
|
gourmand/subscriptions/migrations/0002_make_unique_per_user.py
|
Python
|
gpl-2.0
| 516
|
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
from invenio.config import CFG_SITE_URL, CFG_SITE_RECORD
from invenio.websubmit_functions.Shared_Functions import ParamFromFile
## Description: function Print_Success_SRV
## This function displays a message telling the user the
## revised files have been correctly received
## Author: T.Baron
## PARAMETERS: -
def Print_Success_SRV(parameters, curdir, form, user_info=None):
"""
This function simply displays a text on the screen, telling the
user the revision went fine. To be used in the Submit New File
(SRV) action.
"""
global rn
sysno = ParamFromFile("%s/%s" % (curdir, 'SN')).strip()
t = "<b>Modification completed!</b><br /><br />"
if sysno:
# If we know the URL of the document, we display it for user's convenience (RQF0800417)
url = '%s/%s/%s' % (CFG_SITE_URL, CFG_SITE_RECORD, sysno)
t = "<br /><br /><b>Document %s (<b><a href='%s'>%s</a></b>) was successfully revised.</b>" % (rn, url, url)
else:
t = "<br /><br /><b>Document %s was successfully revised.</b>" % rn
return t
|
CERNDocumentServer/invenio
|
modules/websubmit/lib/functions/Print_Success_SRV.py
|
Python
|
gpl-2.0
| 1,933
|
import uuid
import time
from hashlib import md5
from ..json_store import LoginTokenStore
__author__ = 'bromix'
class AccessManager(object):
def __init__(self, context):
self._settings = context.get_settings()
self._jstore = LoginTokenStore()
self._json = self._jstore.get_data()
self._user = self._json['access_manager'].get('current_user', '0')
self._last_origin = self._json['access_manager'].get('last_origin', 'plugin.video.youtube')
def get_current_user_id(self):
"""
:return: uuid of the current user
"""
self._json = self._jstore.get_data()
return self._json['access_manager']['users'][self.get_user()]['id']
def get_new_user(self, user_name='', addon_id=''):
"""
:param user_name: string, users name
:param addon_id: string, addon id
:return: a new user dict
"""
uuids = list()
new_uuid = uuid.uuid4().hex
for k in list(self._json['access_manager']['users'].keys()):
user_uuid = self._json['access_manager']['users'][k].get('id')
if user_uuid:
uuids.append(user_uuid)
while new_uuid in uuids:
new_uuid = uuid.uuid4().hex
return {'access_token': '', 'refresh_token': '', 'token_expires': -1, 'last_key_hash': '',
'name': user_name, 'id': new_uuid, 'watch_later': ' WL', 'watch_history': 'HL'}
def get_users(self):
"""
Returns users
:return: users
"""
return self._json['access_manager'].get('users', {})
def set_users(self, users):
"""
Updates the users
:param users: dict, users
:return:
"""
self._json = self._jstore.get_data()
self._json['access_manager']['users'] = users
self._jstore.save(self._json)
def set_user(self, user, switch_to=False):
"""
Updates the user
:param user: string, username
:param switch_to: boolean, change current user
:return:
"""
self._user = user
if switch_to:
self._json = self._jstore.get_data()
self._json['access_manager']['current_user'] = user
self._jstore.save(self._json)
def get_user(self):
"""
Returns the current user
:return: user
"""
return self._user
def get_watch_later_id(self):
"""
Returns the current users watch later playlist id
:return: the current users watch later playlist id
"""
self._json = self._jstore.get_data()
current_playlist_id = self._json['access_manager']['users'].get(self._user, {}).get('watch_later', ' WL')
settings_playlist_id = self._settings.get_string('youtube.folder.watch_later.playlist', '').strip()
if settings_playlist_id and (current_playlist_id != settings_playlist_id):
self._json['access_manager']['users'][self._user]['watch_later'] = settings_playlist_id
self._jstore.save(self._json)
self._settings.set_string('youtube.folder.watch_later.playlist', '')
return self._json['access_manager']['users'].get(self._user, {}).get('watch_later', ' WL')
def set_watch_later_id(self, playlist_id):
"""
Sets the current users watch later playlist id
:param playlist_id: string, watch later playlist id
:return:
"""
self._json = self._jstore.get_data()
self._json['access_manager']['users'][self._user]['watch_later'] = playlist_id
self._settings.set_string('youtube.folder.watch_later.playlist', '')
self._jstore.save(self._json)
def get_watch_history_id(self):
"""
Returns the current users watch history playlist id
:return: the current users watch history playlist id
"""
self._json = self._jstore.get_data()
current_playlist_id = self._json['access_manager']['users'].get(self._user, {}).get('watch_history', 'HL')
settings_playlist_id = self._settings.get_string('youtube.folder.history.playlist', '').strip()
if settings_playlist_id and (current_playlist_id != settings_playlist_id):
self._json['access_manager']['users'][self._user]['watch_history'] = settings_playlist_id
self._jstore.save(self._json)
self._settings.set_string('youtube.folder.history.playlist', '')
return self._json['access_manager']['users'].get(self._user, {}).get('watch_history', 'HL')
def set_watch_history_id(self, playlist_id):
"""
Sets the current users watch history playlist id
:param playlist_id: string, watch history playlist id
:return:
"""
self._json = self._jstore.get_data()
self._json['access_manager']['users'][self._user]['watch_history'] = playlist_id
self._settings.set_string('youtube.folder.history.playlist', '')
self._jstore.save(self._json)
def set_last_origin(self, origin):
"""
Updates the origin
:param user: string, origin
:param switch_to: boolean, change last origin
:return:
"""
self._last_origin = origin
self._json = self._jstore.get_data()
self._json['access_manager']['last_origin'] = origin
self._jstore.save(self._json)
def get_last_origin(self):
"""
Returns the last origin
:return:
"""
return self._last_origin
def get_access_token(self):
"""
Returns the access token for some API
:return: access_token
"""
self._json = self._jstore.get_data()
return self._json['access_manager']['users'].get(self._user, {}).get('access_token', '')
def get_refresh_token(self):
"""
Returns the refresh token
:return: refresh token
"""
self._json = self._jstore.get_data()
return self._json['access_manager']['users'].get(self._user, {}).get('refresh_token', '')
def has_refresh_token(self):
return self.get_refresh_token() != ''
def is_access_token_expired(self):
"""
Returns True if the access_token is expired otherwise False.
If no expiration date was provided and an access_token exists
this method will always return True
:return:
"""
self._json = self._jstore.get_data()
access_token = self._json['access_manager']['users'].get(self._user, {}).get('access_token', '')
expires = int(self._json['access_manager']['users'].get(self._user, {}).get('token_expires', -1))
# with no access_token it must be expired
if not access_token:
return True
# in this case no expiration date was set
if expires == -1:
return False
now = int(time.time())
return expires <= now
def update_access_token(self, access_token, unix_timestamp=None, refresh_token=None):
"""
Updates the old access token with the new one.
:param access_token:
:param unix_timestamp:
:param refresh_token:
:return:
"""
self._json = self._jstore.get_data()
self._json['access_manager']['users'][self._user]['access_token'] = access_token
if unix_timestamp is not None:
self._json['access_manager']['users'][self._user]['token_expires'] = int(unix_timestamp)
if refresh_token is not None:
self._json['access_manager']['users'][self._user]['refresh_token'] = refresh_token
self._jstore.save(self._json)
def get_new_developer(self, addon_id):
"""
:param addon_id: string, addon id
:return: a new developer dict
"""
return {'access_token': '', 'refresh_token': '', 'token_expires': -1, 'last_key_hash': ''}
def get_developers(self):
"""
Returns developers
:return: dict, developers
"""
return self._json['access_manager'].get('developers', {})
def set_developers(self, developers):
"""
Updates the users
:param developers: dict, developers
:return:
"""
self._json = self._jstore.get_data()
self._json['access_manager']['developers'] = developers
self._jstore.save(self._json)
def get_dev_access_token(self, addon_id):
"""
Returns the access token for some API
:param addon_id: addon id
:return: access_token
"""
self._json = self._jstore.get_data()
return self._json['access_manager']['developers'].get(addon_id, {}).get('access_token', '')
def get_dev_refresh_token(self, addon_id):
"""
Returns the refresh token
:return: refresh token
"""
self._json = self._jstore.get_data()
return self._json['access_manager']['developers'].get(addon_id, {}).get('refresh_token', '')
def developer_has_refresh_token(self, addon_id):
return self.get_dev_refresh_token(addon_id) != ''
def is_dev_access_token_expired(self, addon_id):
"""
Returns True if the access_token is expired otherwise False.
If no expiration date was provided and an access_token exists
this method will always return True
:return:
"""
self._json = self._jstore.get_data()
access_token = self._json['access_manager']['developers'].get(addon_id, {}).get('access_token', '')
expires = int(self._json['access_manager']['developers'].get(addon_id, {}).get('token_expires', -1))
# with no access_token it must be expired
if not access_token:
return True
# in this case no expiration date was set
if expires == -1:
return False
now = int(time.time())
return expires <= now
def update_dev_access_token(self, addon_id, access_token, unix_timestamp=None, refresh_token=None):
"""
Updates the old access token with the new one.
:param addon_id:
:param access_token:
:param unix_timestamp:
:param refresh_token:
:return:
"""
self._json = self._jstore.get_data()
self._json['access_manager']['developers'][addon_id]['access_token'] = access_token
if unix_timestamp is not None:
self._json['access_manager']['developers'][addon_id]['token_expires'] = int(unix_timestamp)
if refresh_token is not None:
self._json['access_manager']['developers'][addon_id]['refresh_token'] = refresh_token
self._jstore.save(self._json)
def get_dev_last_key_hash(self, addon_id):
self._json = self._jstore.get_data()
return self._json['access_manager']['developers'][addon_id]['last_key_hash']
def set_dev_last_key_hash(self, addon_id, key_hash):
self._json = self._jstore.get_data()
self._json['access_manager']['developers'][addon_id]['last_key_hash'] = key_hash
self._jstore.save(self._json)
def dev_keys_changed(self, addon_id, api_key, client_id, client_secret):
self._json = self._jstore.get_data()
last_hash = self._json['access_manager']['developers'][addon_id]['last_key_hash']
current_hash = self.__calc_key_hash(api_key, client_id, client_secret)
if not last_hash and current_hash:
self.set_dev_last_key_hash(addon_id, current_hash)
return False
if last_hash != current_hash:
self.set_dev_last_key_hash(addon_id, current_hash)
return True
else:
return False
@staticmethod
def __calc_key_hash(api_key, client_id, client_secret):
m = md5()
try:
m.update(api_key.encode('utf-8'))
m.update(client_id.encode('utf-8'))
m.update(client_secret.encode('utf-8'))
except:
m.update(api_key)
m.update(client_id)
m.update(client_secret)
return m.hexdigest()
|
kreatorkodi/repository.torrentbr
|
plugin.video.youtube/resources/lib/youtube_plugin/kodion/utils/access_manager.py
|
Python
|
gpl-2.0
| 12,104
|
# This configuration was automatically generated by install.sh
from os.path import dirname, join as path_join
# This configuration file specifies the global setup of the brat
# server. It is recommended that you use the installation script
# instead of editing this file directly. To do this, run the following
# command in the brat directory:
#
# ./install.sh
#
# if you wish to configure the server manually, you will first need to
# make sure that this file appears as config.py in the brat server
# root directory. If this file is currently named config_template.py,
# you can do this as follows:
#
# cp config_template.py config.py
#
# you will then need to edit config.py, minimally replacing all
# instances of the string CHANGE_ME with their appropriate values.
# Please note that these values MUST appear in quotes, e.g. as in
#
# ADMIN_CONTACT_EMAIL = 'cbogart@cs.cmu.edu'
# Contact email for users to use if the software encounters errors
ADMIN_CONTACT_EMAIL = 'cbogart@cs.cmu.edu'
# Directories required by the brat server:
#
# BASE_DIR: directory in which the server is installed
# DATA_DIR: directory containing texts and annotations
# WORK_DIR: directory that the server uses for temporary files
#
BASE_DIR = dirname(__file__)
DATA_DIR = path_join(BASE_DIR, 'data')
WORK_DIR = path_join(BASE_DIR, 'work')
# If you have installed brat as suggested in the installation
# instructions, you can set up BASE_DIR, DATA_DIR and WORK_DIR by
# removing the three lines above and deleting the initial '#'
# character from the following four lines:
#from os.path import dirname, join
#BASE_DIR = dirname(__file__)
#DATA_DIR = path_join(BASE_DIR, 'data')
#WORK_DIR = path_join(BASE_DIR, 'work')
# To allow editing, include at least one USERNAME:PASSWORD pair below.
# The format is the following:
#
# 'USERNAME': 'PASSWORD',
#
# For example, user `editor` and password `annotate`:
#
# 'editor': 'annotate',
USER_PASSWORD = {
'discoursedb': 'discoursedb'
}
########## ADVANCED CONFIGURATION OPTIONS ##########
# The following options control advanced aspects of the brat server
# setup. It is not necessary to edit these in a basic brat server
# installation.
### MAX_SEARCH_RESULT_NUMBER
# It may be a good idea to limit the max number of results to a search
# as very high numbers can be demanding of both server and clients.
# (unlimited if not defined or <= 0)
MAX_SEARCH_RESULT_NUMBER = 1000
### DEBUG
# Set to True to enable additional debug output
DEBUG = False
### LOG_LEVEL
# If you are a developer you may want to turn on extensive server
# logging by enabling LOG_LEVEL = LL_DEBUG
LL_DEBUG, LL_INFO, LL_WARNING, LL_ERROR, LL_CRITICAL = range(5)
LOG_LEVEL = LL_WARNING
#LOG_LEVEL = LL_DEBUG
### BACKUP_DIR
# Define to enable backups
# from os.path import join
#BACKUP_DIR = join(WORK_DIR, 'backup')
try:
assert DATA_DIR != BACKUP_DIR, 'DATA_DIR cannot equal BACKUP_DIR'
except NameError:
pass # BACKUP_DIR most likely not defined
### SVG_CONVERSION_COMMANDS
# If export to formats other than SVG is needed, the server must have
# a software capable of conversion like inkscape set up, and the
# following must be defined.
# (SETUP NOTE: at least Inkscape 0.46 requires the directory
# ".gnome2/" in the apache home directory and will crash if it doesn't
# exist.)
#SVG_CONVERSION_COMMANDS = [
# ('png', 'inkscape --export-area-drawing --without-gui --file=%s --export-png=%s'),
# ('pdf', 'inkscape --export-area-drawing --without-gui --file=%s --export-pdf=%s'),
# ('eps', 'inkscape --export-area-drawing --without-gui --file=%s --export-eps=%s'),
#]
|
DiscourseDB/discoursedb-core
|
composeddb/brat/config.py
|
Python
|
gpl-2.0
| 3,622
|
#!/usr/bin/python
from body3 import *
function_decl(link='extern',srcp='eval.c:216',
body=bind_expr(
body=statement_list(
E0=decl_expr(
ftype=void_type(algn='8',name='126')),
E1=decl_expr(
ftype=void_type(algn='8',name='126')),
E2=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:53',used='1',
name=identifier_node(string='need_here_doc')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
E3=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=void_type(algn='8',name='126')),
E4=cond_expr(
OP0=truth_andif_expr(
OP0=ne_expr(
OP0=var_decl(algn='32',srcp='shell.h:94',used='1',
name=identifier_node(string='interactive')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=ne_expr(
OP0=nop_expr(
OP0=component_ref(
OP0=var_decl(algn='64',srcp='input.h:89',used='1',
name=identifier_node(string='bash_input')),
OP1=field_decl(algn='32',srcp='input.h:82',
name=identifier_node(string='type'))),
ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')),
OP1=integer_cst(low='3',
ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')))),
OP1=statement_list(
E0=modify_expr(
OP0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
OP1=call_expr(
E0=nop_expr(
OP0=addr_expr(
OP0=pointer_type(algn='64'),
ftype=string_cst(string='PROMPT_COMMAND',
ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))),
ftype=pointer_type(algn='64',ptd='906',size='22')),
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=pointer_type(algn='64',ptd='9',size='22')),
ftype=pointer_type(algn='64',ptd='9',size='22')),
E1=cond_expr(
OP0=ne_expr(
OP0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
OP1=integer_cst(low='0',
ftype=pointer_type(algn='64',ptd='9',size='22')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
OP1=call_expr(
E0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
E1=nop_expr(
OP0=addr_expr(
OP0=pointer_type(algn='64'),
ftype=string_cst(string='PROMPT_COMMAND',
ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))),
ftype=pointer_type(algn='64',ptd='9',size='22')),
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='10721',link='extern',name='10720',srcp='input.h:105')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126')),
E2=cond_expr(
OP0=eq_expr(
OP0=var_decl(algn='32',srcp='eval.c:51',used='1',
name=identifier_node(string='running_under_emacs')),
OP1=integer_cst(low='2',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126'))),
ftype=void_type(algn='8',name='126')),
E5=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:54',used='1',
name=identifier_node(string='current_command_line_count')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
E6=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r')),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='2560',link='extern',name='12695',srcp='externs.h:104')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
E7=cond_expr(
OP0=ne_expr(
OP0=var_decl(algn='32',srcp='eval.c:53',used='1',
name=identifier_node(string='need_here_doc')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='5191',link='extern',name='10700',srcp='input.h:104')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126')),
E8=return_expr(
expr=modify_expr(
OP0=result_decl(algn='32',note='art:artificial',srcp='eval.c:216'),
OP1=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
ftype=void_type(algn='8',name='126'))),
ftype=void_type(algn='8',name='126'),
vars=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r'))),
name=identifier_node(string='parse_command'))
|
h4ck3rm1k3/gcc_py_introspector
|
data/body4.py
|
Python
|
gpl-2.0
| 6,023
|
from ggrade import read_tab_file
import argparse
################################################################################
################################################################################
def main():
# Parse the input arguments
parser = argparse.ArgumentParser()
parser.add_argument('infile_name', type=str, default=None, help='Input file name',nargs='?')
parser.add_argument('--solutions-file', dest='outfile_name', type=str,\
default=None, help='Name of output file to write the solutions to.')
args = parser.parse_args()
# Open the file and pull out the information.
questions,solutions,student_answers = None,None,None
if args.infile_name is not None:
questions,solutions,student_answers = read_tab_file(args.infile_name)
solutions_string = "solutions = [ \n"
extra_feedback_string = "feedback_for_everyone = [ \n"
incorrect_feedback_string = "feedback_for_wrong_answers = [ \n"
points_per_question_string = "points_per_question = [ \n"
nsolutions = len(solutions)
# For now, assume the solutions are the first one.
for i,solution in enumerate(solutions):
solutions_string += "\t\"%s\"" % (solution)
extra_feedback_string += "\tNone"
incorrect_feedback_string += "\tNone"
points_per_question_string += "10"
if i != nsolutions-1:
solutions_string += ", # Question %d\n" % (i+1)
extra_feedback_string += ", # Question %d\n" % (i+1)
incorrect_feedback_string += ", # Question %d\n" % (i+1)
points_per_question_string += ", # Question %d\n" % (i+1)
else:
solutions_string += " # Question %d \n" % (i+1)
extra_feedback_string += " # Question %d \n" % (i+1)
incorrect_feedback_string += " # Question %d \n" % (i+1)
points_per_question_string += " # Question %d \n" % (i+1)
solutions_string += "] \n"
extra_feedback_string += "] \n"
incorrect_feedback_string += "] \n"
points_per_question_string += "] \n"
# Write the output to a file.
outfile_name = "solutions.py"
if args.outfile_name is not None:
outfile_name = args.outfile_name
else:
outfile_name = args.infile_name.split('.tsv')[0]
outfile_name = "SOLUTIONS_%s.py" % (outfile_name)
outfile = open(outfile_name,'w+')
outfile.write("# -*- coding: utf-8 -*-")
outfile.write("\n")
outfile.write(solutions_string)
outfile.write("\n")
outfile.write(extra_feedback_string)
outfile.write("\n")
outfile.write(incorrect_feedback_string)
outfile.write("\n")
outfile.write(points_per_question_string)
outfile.close()
################################################################################
################################################################################
if __name__=="__main__":
main()
|
mattbellis/ggrade
|
scripts/parse_response_file.py
|
Python
|
gpl-2.0
| 2,923
|
#
# Copyright (c) 2004 Conectiva, Inc.
# Copyright (c) 2005--2013 Red Hat, Inc.
#
# From code written by Gustavo Niemeyer <niemeyer@conectiva.com>
# Modified by Joel Martin <jmartin@redhat.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interfaces.up2date.progress import Up2dateProgress
from smart.interfaces.up2date import rhnoptions
from smart.interface import Interface, getScreenWidth
from smart.util.strtools import sizeToStr, printColumns
from smart.const import OPTIONAL, ALWAYS, DEBUG
from smart.fetcher import Fetcher
from smart.report import Report
from smart import *
from smart.transaction import PolicyInstall
from rhn.client.rhnPackages import ServerSettings
from rhn.client import rhnAuth
import getpass
import sys
import os
import commands
class Up2dateInterface(Interface):
def __init__(self, ctrl):
Interface.__init__(self, ctrl)
self._progress = Up2dateProgress()
self._activestatus = False
def getPackages(self, reload=True):
if reload: self._ctrl.reloadChannels()
cache = self._ctrl.getCache()
pkgs = cache.getPackages()
return pkgs
def getRHNPackages(self, reload=True, latest=False):
if reload: self._ctrl.reloadChannels()
cache = self._ctrl.getCache()
pkgs = cache.getPackages()
retpkgs = []
patchlist = []
status, output = commands.getstatusoutput("showrev -p")
if status == 0:
if type(output) == type(""):
output = output.splitlines()
for line in output:
# Patch: 190001-01 Obsoletes: Requires: Incompatibles: Packages: Zpkg2, Zpkg1
if not line.startswith("Patch:"):
continue
parts = line.split()
patchlist.append("patch-solaris-" + parts[1])
for pkg in pkgs:
if pkg.name.startswith("patch-solaris") and not pkg.installed:
matchname = pkg.name + "-" + pkg.version
for patchname in patchlist:
if matchname.startswith(patchname + "-"):
pkg.installed |= 1
for loader in pkg.loaders:
channel = loader.getChannel()
if channel.getType() == "solaris-rhn":
retpkgs.append(pkg)
if latest:
apkgs = []
for pkg in retpkgs:
found = False
for apkg in apkgs:
if pkg.name == apkg.name:
found = True
if pkg > apkg:
apkgs.remove(apkg)
apkgs.append(pkg)
break
if not found:
apkgs.append(pkg)
retpkgs = apkgs
return retpkgs
def run(self, command=None, argv=None):
# argv is the list of packages to install if any
#print "Up2date run() command: ", command, "pkgs: ", argv
action = command["action"]
if command.has_key("channel"):
rhnoptions.setOption("channel", command["channel"])
if command.has_key("global_zone"):
rhnoptions.setOption("global_zone", command["global_zone"])
if command.has_key("admin"):
rhnoptions.setOption("admin", command["admin"])
if command.has_key("response"):
rhnoptions.setOption("response", command["response"])
rhnoptions.setOption("action", action)
result = None
if action in ("", "installall"):
if action == "":
pkgs = argv
if action == "installall":
pkgs = self.getRHNPackages(latest=True)
pkgs = [str(x) for x in pkgs if not x.installed]
import smart.commands.install as install
opts = install.parse_options([])
opts.args = pkgs
opts.yes = True
# Use a custom policy for breaking ties with patches.
if command.has_key("act_native"):
result = install.main(self._ctrl, opts, RHNSolarisGreedyPolicyInstall)
else:
result = install.main(self._ctrl, opts, RHNSolarisPolicyInstall)
if action == "list":
pkgs = self.getRHNPackages()
print _("""
Name Version Rel
----------------------------------------------------------""")
for pkg in pkgs:
if pkg.installed: continue
found = False
for upgs in pkg.upgrades:
for prv in upgs.providedby:
for p in prv.packages:
if p.installed:
found = True
if found:
parts = pkg.version.split("-")
version = parts[0]
release = "-".join(parts[1:])
print "%-40s%-15s%-20s" % (pkg.name, version, release)
# bug 165383: run the packages command after an install
if action in ("", "installall", "packages"):
from rhn.client import rhnPackages
import string
pkglist = self.getPackages()
pkgs = []
#8/8/2005 wregglej 165046
#make sure patches get refreshed by checking to see if they're installed
#and placing them in the pkgs list.
patchlist = []
status, output = commands.getstatusoutput("showrev -p")
if status == 0:
if type(output) == type(""):
output = output.splitlines()
for line in output:
# Patch: 190001-01 Obsoletes: Requires: Incompatibles: Packages: Zpkg2, Zpkg1
if not line.startswith("Patch:"):
continue
parts = line.split()
patchlist.append("patch-solaris-" + parts[1] + "-1")
for pkg in pkglist:
if pkg.name.startswith("patch-solaris"):
matchname = pkg.name + "-" + pkg.version
for patchname in patchlist:
if string.find(matchname, patchname) > -1:
parts = string.split(pkg.version, "-")
version = parts[0]
revision = string.join(parts[1:]) or 1
arch = "sparc-solaris-patch"
pkgs.append((pkg.name, version, revision, "", arch))
elif pkg.installed:
# We won't be listing patch clusters: once installed
# they are just patches
if pkg.name.startswith("patch-solaris"):
arch = "sparc-solaris-patch"
else:
arch = "sparc-solaris"
parts = string.split(pkg.version, "-")
version = string.join(parts[0:-1], "-")
revision = parts[-1] or 1
# bug 164540: removed hard-coded '0' epoch
pkgs.append((pkg.name, version, revision, "", arch))
rhnPackages.refreshPackages(pkgs)
# FIXME (20050415): Proper output method
print "Package list refresh successful"
if action == "hardware":
from rhn.client import rhnHardware
rhnHardware.updateHardware()
# FIXME (20050415): Proper output method
print "Hardware profile refresh successful"
if action == "showall" or action == "show_available" \
or action == "showall_with_channels" or action == "show_available_with_channels":
# Show the latest of each package in RHN
pkgs = self.getRHNPackages(latest=True)
for pkg in pkgs:
if action.startswith("show_available") and pkg.installed: continue
if action.endswith("_with_channels"):
channelName = ""
for (ldr,info) in pkg.loaders.items():
channel = ldr.getChannel()
if channel.getType() == "solaris-rhn":
channelLabel = info['baseurl'][6:-1]
break
print "%-40s%-30s" % (str(pkg), channelLabel)
else:
print str(pkg)
if action == "show_orphans":
pkgs = self.getPackages()
rhn_pkgs = self.getRHNPackages(reload=False)
for pkg in pkgs:
if pkg not in rhn_pkgs:
print str(pkg)
if action == "get":
import smart.commands.download as download
opts = download.parse_options([])
opts.args = argv
opts.yes = True
result = download.main(self._ctrl, opts)
if action == "show_channels":
serverSettings = ServerSettings()
li = rhnAuth.getLoginInfo()
channels = li.get('X-RHN-Auth-Channels')
for channelInfo in channels:
print channelInfo[0]
return result
def getProgress(self, obj, hassub=False):
self._progress.setHasSub(hassub)
self._progress.setFetcherMode(isinstance(obj, Fetcher))
return self._progress
def getSubProgress(self, obj):
return self._progress
def showStatus(self, msg):
if self._activestatus:
pass
# print
else:
self._activestatus = True
#sys.stdout.write(msg)
#sys.stdout.flush()
def hideStatus(self):
if self._activestatus:
self._activestatus = False
print
def askYesNo(self, question, default=False):
self.hideStatus()
mask = default and _("%s (Y/n): ") or _("%s (y/N): ")
res = raw_input(mask % question).strip().lower()
print
if res:
return (_("yes").startswith(res) and not
_("no").startswith(res))
return default
def askContCancel(self, question, default=False):
self.hideStatus()
if default:
mask = _("%s (Continue/cancel): ")
else:
mask = _("%s (continue/Cancel): ")
res = raw_input(mask % question).strip().lower()
print
if res:
return (_("continue").startswith(res) and not
_("cancel").startswith(res))
return default
def askOkCancel(self, question, default=False):
self.hideStatus()
mask = default and _("%s (Ok/cancel): ") or _("%s (ok/Cancel): ")
res = raw_input(mask % question).strip().lower()
print
if res:
return (_("ok").startswith(res) and not
_("cancel").startswith(res))
return default
def confirmChangeSet(self, changeset):
return self.showChangeSet(changeset, confirm=True)
def askInput(self, prompt, message=None, widthchars=None, echo=True):
print
if message:
print message
prompt += ": "
try:
if echo:
res = raw_input(prompt)
else:
res = getpass.getpass(prompt)
except KeyboardInterrupt:
res = ""
print
return res
def askPassword(self, location, caching=OPTIONAL):
self._progress.lock()
passwd = Interface.askPassword(self, location, caching)
self._progress.unlock()
return passwd
def insertRemovableChannels(self, channels):
self.hideStatus()
print
print _("Insert one or more of the following removable channels:")
print
for channel in channels:
print " ", str(channel)
print
return self.askOkCancel(_("Continue?"), True)
# Non-standard interface methods:
def showChangeSet(self, changeset, keep=None, confirm=False):
self.hideStatus()
report = Report(changeset)
report.compute()
screenwidth = getScreenWidth()
hideversion = sysconf.get("text-hide-version", len(changeset) > 40)
if hideversion:
def cvt(lst):
return [x.name for x in lst]
else:
def cvt(lst):
return lst
print
if keep:
keep = cvt(keep)
keep.sort()
print _("Kept packages (%d):") % len(keep)
printColumns(keep, indent=2, width=screenwidth)
print
pkgs = report.upgrading.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Upgrading packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
pkgs = report.downgrading.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Downgrading packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
pkgs = report.installing.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Installed packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
pkgs = report.removed.keys()
if pkgs:
pkgs = cvt(pkgs)
pkgs.sort()
print _("Removed packages (%d):") % len(pkgs)
printColumns(pkgs, indent=2, width=screenwidth)
print
dsize = report.getDownloadSize()
size = report.getInstallSize() - report.getRemoveSize()
if dsize:
sys.stdout.write(_("%s of package files are needed. ") %
sizeToStr(dsize))
if size > 0:
sys.stdout.write(_("%s will be used.") % sizeToStr(size))
elif size < 0:
size *= -1
sys.stdout.write(_("%s will be freed.") % sizeToStr(size))
if dsize or size:
sys.stdout.write("\n\n")
if confirm:
return self.askYesNo(_("Confirm changes?"), True)
return True
class RHNSolarisPolicyInstall(PolicyInstall):
def getPriorityWeights(self, targetPkg, providingPkgs):
# We first need to determine whether we are dealing with a package
# or a patch. For packages, we'll defer to the standard installation
# policy; we only want special behavior for patches.
#
if not targetPkg.isPatch():
return \
PolicyInstall.getPriorityWeights(self, targetPkg, providingPkgs)
# At this point, we have a list of patches. We'll assign weights based
# on how qualified each providing package is. Here's how:
#
# Let T be the package we wish to find the best provider for.
# Let P be the set of patches which was determined to provide T.
# For each P[i], let X be the the set of patches that provides P[i].
#
# We determine qualification based on count(X) for each P[i]. The
# lower the count(X), the more qualified P[i] is, and the higher it
# will be weighted.
#
# In the SmartPM dep solver, a lower weight indicates a better match.
# Therefore, at the end of this algorithm, the P[i] with the lowest
# count(X) should be the lowest-weighted. In the event of a tie, where
# more than one P[i] is of equally low weight, we allow the "winner" to
# be arbitrarily picked by the calling code.
#
# If P[i] is not a patch, it must be a package. In this case, we
# automatically weight it with the highest value and exclude it from
# our search. We never want a package to override a patch. We
# shouldn't see this scenario, but we'll account for it just in case.
#
# This algorithm makes a number of assumptions based on extensive
# observations of the Solaris patch distribution web page at SunSolve
# (http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/patch-access).
# These are:
#
# - If a patch P2 obsoletes another path, P1, then P2 will provide
# both P2 and P1.
#
# - If a patch P3 then obsoletes P2, P3 will provide both P3, P2,
# and P1.
#
# - In no case will two patches, P4 and P3, both obsolete another
# patch P2 without P4 also obsoleting P3 or vice-versa. In other
# words, patches must be accumulated in a hierarchical manner;
# two or more patches may not accumulate another at the same tree
# level.
result = {}
nameTable = {}
# First, populate the result set with the lowest possible weights.
# Then, create a mapping between package names and the actual package
# objects. Since pkg.provides is a collection of Provides objects,
# this will allow us to efficiently reference back to the original
# packages.
for providingPkg in providingPkgs:
result[providingPkg] = 0.0
nameTable[providingPkg.name] = providingPkg
# Now iterate again and adjust the weights according to the number of
# providers for each patch.
for providingPkg in providingPkgs:
# Non-patches just don't make sense in this context. Give them a
# very high weight.
if not providingPkg.isPatch():
result[providingPkg] = 9999999.0
else:
# Iterate over each patch that this patch provides and add
# to the weight each time it appears. This will allow the
# more qualified patches to rise to the top. A lower weight
# indicates a better qualification.
for providedPkg in providingPkg.provides:
# Only include it in the result if its in the set we're
# working with.
if nameTable.has_key(providedPkg.name):
result[nameTable[providedPkg.name]] += 10.0
return result
class RHNSolarisGreedyPolicyInstall(RHNSolarisPolicyInstall):
def getWeight(self, changeset):
# Do not peanlize for bringing in extra packages
# BZ: #428490
return 0
|
PaulWay/spacewalk
|
client/solaris/smartpm/smart/interfaces/up2date/interface.py
|
Python
|
gpl-2.0
| 19,239
|
# -*- coding: cp1254 -*-
# please visit http://www.iptvxtra.net
import xbmc,xbmcgui,xbmcplugin,sys
icondir = xbmc.translatePath("special://home/addons/plugin.audio.radio7ulm/icons/")
plugin_handle = int(sys.argv[1])
def add_video_item(url, infolabels, img=''):
listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img, thumbnailImage=img)
listitem.setInfo('video', infolabels)
listitem.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(plugin_handle, url, listitem, isFolder=False)
add_video_item('http://srv01.radio7.fmstreams.de/stream1/livestream.mp3',{ 'title': 'Radio 7 - Webradio'},img=icondir + 'radio-7_web.png')
add_video_item('http://srv02.radio7.fmstreams.de/radio7_upa',{ 'title': 'Radio 7 - 80er'},img=icondir + 'radio-7_80er.png')
add_video_item('http://srv02.radio7.fmstreams.de/radio7_downa',{ 'title': 'Radio 7 - Herz'},img=icondir + 'radio-7_herz.png')
add_video_item('http://str0.creacast.com/radio7_acta',{ 'title': 'Radio 7 - OnTour'},img=icondir + 'radio-7_ontour.png')
add_video_item('http://srv01.radio7.fmstreams.de/stream5/livestream.mp3',{ 'title': 'Radio 7 - Live'},img=icondir + 'radio-7_live.png')
xbmcplugin.endOfDirectory(plugin_handle)
xbmc.executebuiltin("Container.SetViewMode(500)")
|
noba3/KoTos
|
addons/plugin.audio.radio7ulm/default.py
|
Python
|
gpl-2.0
| 1,255
|
#
# Utility functions for the command line drivers
#
# Copyright 2006-2007, 2013, 2014 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import argparse
import logging
import logging.handlers
import os
import shlex
import subprocess
import sys
import traceback
import libvirt
from virtcli import CLIConfig
from . import util
from .clock import Clock
from .cpu import CPU
from .deviceaudio import VirtualAudio
from .devicechar import (VirtualChannelDevice, VirtualConsoleDevice,
VirtualSerialDevice, VirtualParallelDevice)
from .devicecontroller import VirtualController
from .devicedisk import VirtualDisk
from .devicefilesystem import VirtualFilesystem
from .devicegraphics import VirtualGraphics
from .devicehostdev import VirtualHostDevice
from .deviceinput import VirtualInputDevice
from .deviceinterface import VirtualNetworkInterface
from .devicememballoon import VirtualMemballoon
from .devicepanic import VirtualPanicDevice
from .deviceredirdev import VirtualRedirDevice
from .devicerng import VirtualRNGDevice
from .devicesmartcard import VirtualSmartCardDevice
from .devicetpm import VirtualTPMDevice
from .devicevideo import VirtualVideoDevice
from .devicewatchdog import VirtualWatchdog
from .domainblkiotune import DomainBlkiotune
from .domainfeatures import DomainFeatures
from .domainmemorybacking import DomainMemorybacking
from .domainmemorytune import DomainMemorytune
from .domainnumatune import DomainNumatune
from .domainresource import DomainResource
from .idmap import IdMap
from .nodedev import NodeDevice
from .osxml import OSXML
from .pm import PM
from .seclabel import Seclabel
from .storage import StoragePool, StorageVolume
##########################
# Global option handling #
##########################
class _GlobalState(object):
def __init__(self):
self.quiet = False
self.all_checks = None
self._validation_checks = {}
def set_validation_check(self, checkname, val):
self._validation_checks[checkname] = val
def get_validation_check(self, checkname):
if self.all_checks is not None:
return self.all_checks
# Default to True for all checks
return self._validation_checks.get(checkname, True)
_globalstate = None
def get_global_state():
return _globalstate
def _reset_global_state():
global _globalstate
_globalstate = _GlobalState()
####################
# CLI init helpers #
####################
class VirtStreamHandler(logging.StreamHandler):
def emit(self, record):
"""
Based on the StreamHandler code from python 2.6: ripping out all
the unicode handling and just unconditionally logging seems to fix
logging backtraces with unicode locales (for me at least).
No doubt this is atrocious, but it WORKSFORME!
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
stream.write(fs % msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class VirtHelpFormatter(argparse.RawDescriptionHelpFormatter):
'''
Subclass the default help formatter to allow printing newline characters
in --help output. The way we do this is a huge hack :(
Inspiration: http://groups.google.com/group/comp.lang.python/browse_thread/thread/6df6e6b541a15bc2/09f28e26af0699b1
'''
oldwrap = None
def _split_lines(self, *args, **kwargs):
def return_default():
return argparse.RawDescriptionHelpFormatter._split_lines(
self, *args, **kwargs)
if len(kwargs) != 0 and len(args) != 2:
return return_default()
try:
text = args[0]
if "\n" in text:
return text.splitlines()
return return_default()
except:
return return_default()
def setupParser(usage, description, introspection_epilog=False):
epilog = _("See man page for examples and full option syntax.")
if introspection_epilog:
epilog = _("Use '--option=?' or '--option help' to see "
"available suboptions") + "\n" + epilog
parser = argparse.ArgumentParser(
usage=usage, description=description,
formatter_class=VirtHelpFormatter,
epilog=epilog)
parser.add_argument('--version', action='version',
version=CLIConfig.version)
return parser
def earlyLogging():
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
def setupLogging(appname, debug_stdout, do_quiet, cli_app=True):
_reset_global_state()
get_global_state().quiet = do_quiet
vi_dir = None
logfile = None
if "VIRTINST_TEST_SUITE" not in os.environ:
vi_dir = util.get_cache_dir()
logfile = os.path.join(vi_dir, appname + ".log")
try:
if vi_dir and not os.access(vi_dir, os.W_OK):
if os.path.exists(vi_dir):
raise RuntimeError("No write access to directory %s" % vi_dir)
try:
os.makedirs(vi_dir, 0751)
except IOError, e:
raise RuntimeError("Could not create directory %s: %s" %
(vi_dir, e))
if (logfile and
os.path.exists(logfile) and
not os.access(logfile, os.W_OK)):
raise RuntimeError("No write access to logfile %s" % logfile)
except Exception, e:
logging.warning("Error setting up logfile: %s", e)
logfile = None
dateFormat = "%a, %d %b %Y %H:%M:%S"
fileFormat = ("[%(asctime)s " + appname + " %(process)d] "
"%(levelname)s (%(module)s:%(lineno)d) %(message)s")
streamErrorFormat = "%(levelname)-8s %(message)s"
rootLogger = logging.getLogger()
# Undo early logging
for handler in rootLogger.handlers:
rootLogger.removeHandler(handler)
rootLogger.setLevel(logging.DEBUG)
if logfile:
fileHandler = logging.handlers.RotatingFileHandler(
logfile, "ae", 1024 * 1024, 5)
fileHandler.setFormatter(
logging.Formatter(fileFormat, dateFormat))
rootLogger.addHandler(fileHandler)
streamHandler = VirtStreamHandler(sys.stderr)
if debug_stdout:
streamHandler.setLevel(logging.DEBUG)
streamHandler.setFormatter(logging.Formatter(fileFormat,
dateFormat))
elif cli_app or not logfile:
if get_global_state().quiet:
level = logging.ERROR
else:
level = logging.WARN
streamHandler.setLevel(level)
streamHandler.setFormatter(logging.Formatter(streamErrorFormat))
else:
streamHandler = None
if streamHandler:
rootLogger.addHandler(streamHandler)
util.register_libvirt_error_handler()
# Log uncaught exceptions
def exception_log(typ, val, tb):
logging.debug("Uncaught exception:\n%s",
"".join(traceback.format_exception(typ, val, tb)))
sys.__excepthook__(typ, val, tb)
sys.excepthook = exception_log
logging.getLogger("requests").setLevel(logging.ERROR)
# Log the app command string
logging.debug("Launched with command line: %s", " ".join(sys.argv))
##############################
# Libvirt connection helpers #
##############################
def getConnection(uri):
from .connection import VirtualConnection
logging.debug("Requesting libvirt URI %s", (uri or "default"))
conn = VirtualConnection(uri)
conn.open(_do_creds_authname)
conn.cache_object_fetch = True
logging.debug("Received libvirt URI %s", conn.uri)
return conn
# SASL username/pass auth
def _do_creds_authname(creds):
retindex = 4
for cred in creds:
credtype, prompt, ignore, ignore, ignore = cred
prompt += ": "
res = cred[retindex]
if credtype == libvirt.VIR_CRED_AUTHNAME:
res = raw_input(prompt)
elif credtype == libvirt.VIR_CRED_PASSPHRASE:
import getpass
res = getpass.getpass(prompt)
else:
raise RuntimeError("Unknown auth type in creds callback: %d" %
credtype)
cred[retindex] = res
return 0
##############################
# Misc CLI utility functions #
##############################
def fail(msg, do_exit=True):
"""
Convenience function when failing in cli app
"""
logging.debug("".join(traceback.format_stack()))
logging.error(msg)
if traceback.format_exc().strip() != "None":
logging.debug("", exc_info=True)
if do_exit:
_fail_exit()
def print_stdout(msg, do_force=False):
if do_force or not get_global_state().quiet:
print msg
def print_stderr(msg):
logging.debug(msg)
print >> sys.stderr, msg
def _fail_exit():
sys.exit(1)
def nice_exit():
print_stdout(_("Exiting at user request."))
sys.exit(0)
def virsh_start_cmd(guest):
return ("virsh --connect %s start %s" % (guest.conn.uri, guest.name))
def install_fail(guest):
virshcmd = virsh_start_cmd(guest)
print_stderr(
_("Domain installation does not appear to have been successful.\n"
"If it was, you can restart your domain by running:\n"
" %s\n"
"otherwise, please restart your installation.") % virshcmd)
sys.exit(1)
def set_prompt(prompt):
# Set whether we allow prompts, or fail if a prompt pops up
if prompt:
logging.warning("--prompt mode is no longer supported.")
def validate_disk(dev, warn_overwrite=False):
def _optional_fail(msg, checkname):
do_check = get_global_state().get_validation_check(checkname)
if do_check:
fail(msg + (_(" (Use --check %s=off or "
"--check all=off to override)") % checkname))
logging.debug("Skipping --check %s error condition '%s'",
checkname, msg)
logging.warn(msg)
def check_path_exists(dev):
"""
Prompt if disk file already exists and preserve mode is not used
"""
if not warn_overwrite:
return
if not VirtualDisk.path_definitely_exists(dev.conn, dev.path):
return
_optional_fail(
_("This will overwrite the existing path '%s'") % dev.path,
"path_exists")
def check_inuse_conflict(dev):
"""
Check if disk is inuse by another guest
"""
names = dev.is_conflict_disk()
if not names:
return
_optional_fail(_("Disk %s is already in use by other guests %s." %
(dev.path, names)),
"path_in_use")
def check_size_conflict(dev):
"""
Check if specified size exceeds available storage
"""
isfatal, errmsg = dev.is_size_conflict()
# The isfatal case should have already caused us to fail
if not isfatal and errmsg:
_optional_fail(errmsg, "disk_size")
def check_path_search(dev):
user, broken_paths = dev.check_path_search(dev.conn, dev.path)
if not broken_paths:
return
logging.warning(_("%s may not be accessible by the hypervisor. "
"You will need to grant the '%s' user search permissions for "
"the following directories: %s"), dev.path, user, broken_paths)
check_path_exists(dev)
check_inuse_conflict(dev)
check_size_conflict(dev)
check_path_search(dev)
def _run_console(args):
logging.debug("Running: %s", " ".join(args))
child = os.fork()
if child:
return child
os.execvp(args[0], args)
os._exit(1) # pylint: disable=protected-access
def _gfx_console(guest):
args = ["virt-viewer",
"--connect", guest.conn.uri,
"--wait", guest.name]
logging.debug("Launching virt-viewer for graphics type '%s'",
guest.get_devices("graphics")[0].type)
return _run_console(args)
def _txt_console(guest):
args = ["virsh",
"--connect", guest.conn.uri,
"console", guest.name]
logging.debug("Connecting to text console")
return _run_console(args)
def connect_console(guest, consolecb, wait):
"""
Launched the passed console callback for the already defined
domain. If domain isn't running, return an error.
"""
child = None
if consolecb:
child = consolecb(guest)
if not child or not wait:
return
# If we connected the console, wait for it to finish
try:
os.waitpid(child, 0)
except OSError, e:
logging.debug("waitpid: %s: %s", e.errno, e.message)
def get_console_cb(guest):
gdevs = guest.get_devices("graphics")
if not gdevs:
return _txt_console
gtype = gdevs[0].type
if gtype not in ["default",
VirtualGraphics.TYPE_VNC,
VirtualGraphics.TYPE_SPICE]:
logging.debug("No viewer to launch for graphics type '%s'", gtype)
return
try:
subprocess.check_output(["virt-viewer", "--version"])
except OSError:
logging.warn(_("Unable to connect to graphical console: "
"virt-viewer not installed. Please install "
"the 'virt-viewer' package."))
return None
if not os.environ.get("DISPLAY", ""):
logging.warn(_("Graphics requested but DISPLAY is not set. "
"Not running virt-viewer."))
return None
return _gfx_console
def get_meter():
quiet = (get_global_state().quiet or "VIRTINST_TEST_SUITE" in os.environ)
return util.make_meter(quiet=quiet)
###########################
# Common CLI option/group #
###########################
def add_connect_option(parser, invoker=None):
if invoker == "virt-xml":
parser.add_argument("-c", "--connect", metavar="URI",
help=_("Connect to hypervisor with libvirt URI"))
else:
parser.add_argument("--connect", metavar="URI",
help=_("Connect to hypervisor with libvirt URI"))
def add_misc_options(grp, prompt=False, replace=False,
printxml=False, printstep=False,
noreboot=False, dryrun=False,
noautoconsole=False):
if prompt:
grp.add_argument("--prompt", action="store_true",
default=False, help=argparse.SUPPRESS)
grp.add_argument("--force", action="store_true",
default=False, help=argparse.SUPPRESS)
if noautoconsole:
grp.add_argument("--noautoconsole", action="store_false",
dest="autoconsole", default=True,
help=_("Don't automatically try to connect to the guest console"))
if noreboot:
grp.add_argument("--noreboot", action="store_true",
help=_("Don't boot guest after completing install."))
if replace:
grp.add_argument("--replace", action="store_true",
help=_("Don't check name collision, overwrite any guest "
"with the same name."))
if printxml:
print_kwargs = {
"dest": "xmlonly",
"default": False,
"help": _("Print the generated domain XML rather than create "
"the guest."),
}
if printstep:
print_kwargs["nargs"] = "?"
print_kwargs["const"] = "all"
else:
print_kwargs["action"] = "store_true"
grp.add_argument("--print-xml", **print_kwargs)
if printstep:
# Back compat, argparse allows us to use --print-xml
# for everything.
grp.add_argument("--print-step", dest="xmlstep",
help=argparse.SUPPRESS)
if dryrun:
grp.add_argument("--dry-run", action="store_true", dest="dry",
help=_("Run through install process, but do not "
"create devices or define the guest."))
if prompt:
grp.add_argument("--check",
help=_("Enable or disable validation checks. Example:\n"
"--check path_in_use=off\n"
"--check all=off"))
grp.add_argument("-q", "--quiet", action="store_true",
help=_("Suppress non-error output"))
grp.add_argument("-d", "--debug", action="store_true",
help=_("Print debugging information"))
def add_metadata_option(grp):
grp.add_argument("--metadata",
help=_("Configure guest metadata. Ex:\n"
"--metadata name=foo,title=\"My pretty title\",uuid=...\n"
"--metadata description=\"My nice long description\""))
def add_memory_option(grp, backcompat=False):
grp.add_argument("--memory",
help=_("Configure guest memory allocation. Ex:\n"
"--memory 1024 (in MiB)\n"
"--memory 512,maxmemory=1024"))
if backcompat:
grp.add_argument("-r", "--ram", type=int, dest="oldmemory",
help=argparse.SUPPRESS)
def vcpu_cli_options(grp, backcompat=True, editexample=False):
grp.add_argument("--vcpus",
help=_("Number of vcpus to configure for your guest. Ex:\n"
"--vcpus 5\n"
"--vcpus 5,maxcpus=10,cpuset=1-4,6,8\n"
"--vcpus sockets=2,cores=4,threads=2,"))
extramsg = "--cpu host"
if editexample:
extramsg = "--cpu host-model,clearxml=yes"
grp.add_argument("--cpu",
help=_("CPU model and features. Ex:\n"
"--cpu coreduo,+x2apic\n") + extramsg)
if backcompat:
grp.add_argument("--check-cpu", action="store_true",
help=argparse.SUPPRESS)
grp.add_argument("--cpuset", help=argparse.SUPPRESS)
def add_gfx_option(devg):
devg.add_argument("--graphics", action="append",
help=_("Configure guest display settings. Ex:\n"
"--graphics vnc\n"
"--graphics spice,port=5901,tlsport=5902\n"
"--graphics none\n"
"--graphics vnc,password=foobar,port=5910,keymap=ja"))
def add_net_option(devg):
devg.add_argument("-w", "--network", action="append",
help=_("Configure a guest network interface. Ex:\n"
"--network bridge=mybr0\n"
"--network network=my_libvirt_virtual_net\n"
"--network network=mynet,model=virtio,mac=00:11...\n"
"--network none\n"
"--network help"))
def add_device_options(devg, sound_back_compat=False):
devg.add_argument("--controller", action="append",
help=_("Configure a guest controller device. Ex:\n"
"--controller type=usb,model=ich9-ehci1"))
devg.add_argument("--input", action="append",
help=_("Configure a guest input device. Ex:\n"
"--input tablet\n"
"--input keyboard,bus=usb"))
devg.add_argument("--serial", action="append",
help=_("Configure a guest serial device"))
devg.add_argument("--parallel", action="append",
help=_("Configure a guest parallel device"))
devg.add_argument("--channel", action="append",
help=_("Configure a guest communication channel"))
devg.add_argument("--console", action="append",
help=_("Configure a text console connection between "
"the guest and host"))
devg.add_argument("--hostdev", action="append",
help=_("Configure physical USB/PCI/etc host devices "
"to be shared with the guest"))
devg.add_argument("--filesystem", action="append",
help=_("Pass host directory to the guest. Ex: \n"
"--filesystem /my/source/dir,/dir/in/guest\n"
"--filesystem template_name,/,type=template"))
# Back compat name
devg.add_argument("--host-device", action="append", dest="hostdev",
help=argparse.SUPPRESS)
# --sound used to be a boolean option, hence the nargs handling
sound_kwargs = {
"action": "append",
"help": _("Configure guest sound device emulation"),
}
if sound_back_compat:
sound_kwargs["nargs"] = '?'
devg.add_argument("--sound", **sound_kwargs)
if sound_back_compat:
devg.add_argument("--soundhw", action="append", dest="sound",
help=argparse.SUPPRESS)
devg.add_argument("--watchdog", action="append",
help=_("Configure a guest watchdog device"))
devg.add_argument("--video", action="append",
help=_("Configure guest video hardware."))
devg.add_argument("--smartcard", action="append",
help=_("Configure a guest smartcard device. Ex:\n"
"--smartcard mode=passthrough"))
devg.add_argument("--redirdev", action="append",
help=_("Configure a guest redirection device. Ex:\n"
"--redirdev usb,type=tcp,server=192.168.1.1:4000"))
devg.add_argument("--memballoon", action="append",
help=_("Configure a guest memballoon device. Ex:\n"
"--memballoon model=virtio"))
devg.add_argument("--tpm", action="append",
help=_("Configure a guest TPM device. Ex:\n"
"--tpm /dev/tpm"))
devg.add_argument("--rng", action="append",
help=_("Configure a guest RNG device. Ex:\n"
"--rng /dev/random"))
devg.add_argument("--panic", action="append",
help=_("Configure a guest panic device. Ex:\n"
"--panic default"))
def add_guest_xml_options(geng):
geng.add_argument("--security", action="append",
help=_("Set domain security driver configuration."))
geng.add_argument("--numatune",
help=_("Tune NUMA policy for the domain process."))
geng.add_argument("--memtune", action="append",
help=_("Tune memory policy for the domain process."))
geng.add_argument("--blkiotune", action="append",
help=_("Tune blkio policy for the domain process."))
geng.add_argument("--memorybacking", action="append",
help=_("Set memory backing policy for the domain process. Ex:\n"
"--memorybacking hugepages=on"))
geng.add_argument("--features",
help=_("Set domain <features> XML. Ex:\n"
"--features acpi=off\n"
"--features apic=on,eoi=on"))
geng.add_argument("--clock",
help=_("Set domain <clock> XML. Ex:\n"
"--clock offset=localtime,rtc_tickpolicy=catchup"))
geng.add_argument("--pm",
help=_("Configure VM power management features"))
geng.add_argument("--events",
help=_("Configure VM lifecycle management policy"))
geng.add_argument("--resource", action="append",
help=_("Configure VM resource partitioning (cgroups)"))
def add_boot_options(insg):
insg.add_argument("--boot",
help=_("Configure guest boot settings. Ex:\n"
"--boot hd,cdrom,menu=on\n"
"--boot init=/sbin/init (for containers)"))
insg.add_argument("--idmap",
help=_("Enable user namespace for LXC container. Ex:\n"
"--idmap uid_start=0,uid_target=1000,uid_count=10"))
def add_disk_option(stog, editexample=False):
editmsg = ""
if editexample:
editmsg += "\n--disk cache= (unset cache)"
stog.add_argument("--disk", action="append",
help=_("Specify storage with various options. Ex.\n"
"--disk size=10 (new 10GiB image in default location)\n"
"--disk /my/existing/disk,cache=none\n"
"--disk device=cdrom,bus=scsi\n"
"--disk=?") + editmsg)
#############################################
# CLI complex parsing helpers #
# (for options like --disk, --network, etc. #
#############################################
def _on_off_convert(key, val):
if val is None:
return None
def _yes_no_convert(s):
tvalues = ["y", "yes", "1", "true", "t", "on"]
fvalues = ["n", "no", "0", "false", "f", "off"]
s = (s or "").lower()
if s in tvalues:
return True
elif s in fvalues:
return False
return None
val = _yes_no_convert(val)
if val is not None:
return val
raise fail(_("%(key)s must be 'yes' or 'no'") % {"key": key})
class _VirtCLIArgument(object):
def __init__(self, attrname, cliname,
setter_cb=None, ignore_default=False,
can_comma=False, aliases=None,
is_list=False, is_onoff=False,
lookup_cb=None, is_novalue=False):
"""
A single subargument passed to compound command lines like --disk,
--network, etc.
@attrname: The virtinst API attribute name the cliargument maps to.
If this is a virtinst object method, it will be called.
@cliname: The command line option name, 'path' for path=FOO
@setter_cb: Rather than set an attribute directly on the virtinst
object, (opts, inst, cliname, val) to this callback to handle it.
@ignore_default: If the value passed on the cli is 'default', don't
do anything.
@can_comma: If True, this option is expected to have embedded commas.
After the parser sees this option, it will iterate over the
option string until it finds another known argument name:
everything prior to that argument name is considered part of
the value of this option, '=' included. Should be used sparingly.
@aliases: List of cli aliases. Useful if we want to change a property
name on the cli but maintain back compat.
@is_list: This value should be stored as a list, so multiple instances
are appended.
@is_onoff: The value expected on the cli is on/off or yes/no, convert
it to true/false.
@lookup_cb: If specified, use this function for performing match
lookups.
@is_novalue: If specified, the parameter is not expected in the
form FOO=BAR, but just FOO.
"""
self.attrname = attrname
self.cliname = cliname
self.setter_cb = setter_cb
self.can_comma = can_comma
self.ignore_default = ignore_default
self.aliases = util.listify(aliases)
self.is_list = is_list
self.is_onoff = is_onoff
self.lookup_cb = lookup_cb
self.is_novalue = is_novalue
def parse(self, opts, inst, support_cb=None, lookup=False):
val = None
for cliname in self.aliases + [self.cliname]:
# We iterate over all values unconditionally, so they are
# removed from opts
foundval = opts.get_opt_param(cliname, self.is_novalue)
if foundval is not None:
val = foundval
if val is None:
return
if val == "":
val = None
if support_cb:
support_cb(inst, self.attrname, self.cliname)
if self.is_onoff:
val = _on_off_convert(self.cliname, val)
if val == "default" and self.ignore_default and not lookup:
return
if lookup and not self.attrname and not self.lookup_cb:
raise RuntimeError(
_("Don't know how to match device type '%(device_type)s' "
"property '%(property_name)s'") %
{"device_type": getattr(inst, "virtual_device_type", ""),
"property_name": self.cliname})
try:
if self.attrname:
eval("inst." + self.attrname) # pylint: disable=eval-used
except AttributeError:
raise RuntimeError("programming error: obj=%s does not have "
"member=%s" % (inst, self.attrname))
if lookup:
if self.lookup_cb:
return self.lookup_cb(opts, inst, self.cliname, val)
else:
return eval( # pylint: disable=eval-used
"inst." + self.attrname) == val
elif self.setter_cb:
self.setter_cb(opts, inst, self.cliname, val)
else:
exec( # pylint: disable=exec-used
"inst." + self.attrname + " = val")
class VirtOptionString(object):
def __init__(self, optstr, virtargs, remove_first):
"""
Helper class for parsing opt strings of the form
opt1=val1,opt2=val2,...
@optstr: The full option string
@virtargs: A list of VirtCLIArguments
@remove_first: List or parameters to peel off the front of
option string, and store in the returned dict.
remove_first=["char_type"] for --serial pty,foo=bar
maps to {"char_type", "pty", "foo" : "bar"}
"""
self.fullopts = optstr
virtargmap = {}
for arg in virtargs:
virtargmap[arg.cliname] = arg
for alias in arg.aliases:
virtargmap[alias] = arg
# @opts: A dictionary of the mapping {cliname: val}
# @orderedopts: A list of tuples (cliname: val), in the order
# they appeared on the CLI.
self.opts, self.orderedopts = self._parse_optstr(
virtargmap, remove_first)
def get_opt_param(self, key, is_novalue=False):
if key not in self.opts:
return None
ret = self.opts.pop(key)
if ret is None:
if not is_novalue:
raise RuntimeError("Option '%s' had no value set." % key)
ret = ""
return ret
def check_leftover_opts(self):
if not self.opts:
return
raise fail(_("Unknown options %s") % self.opts.keys())
###########################
# Actual parsing routines #
###########################
def _parse_optstr_tuples(self, virtargmap, remove_first):
"""
Parse the command string into an ordered list of tuples (see
docs for orderedopts
"""
optstr = str(self.fullopts or "")
optlist = []
argsplitter = shlex.shlex(optstr, posix=True)
argsplitter.commenters = ""
argsplitter.whitespace = ","
argsplitter.whitespace_split = True
remove_first = util.listify(remove_first)[:]
commaopt = None
for opt in list(argsplitter):
if not opt:
continue
cliname = opt
val = None
if opt.count("="):
cliname, val = opt.split("=", 1)
remove_first = []
elif remove_first:
val = cliname
cliname = remove_first.pop(0)
if commaopt:
if cliname in virtargmap:
optlist.append(tuple(commaopt))
commaopt = None
else:
commaopt[1] += "," + cliname
if val:
commaopt[1] += "=" + val
continue
if (cliname in virtargmap and virtargmap[cliname].can_comma):
commaopt = [cliname, val]
continue
optlist.append((cliname, val))
if commaopt:
optlist.append(tuple(commaopt))
return optlist
def _parse_optstr(self, virtargmap, remove_first):
orderedopts = self._parse_optstr_tuples(virtargmap, remove_first)
optdict = {}
for cliname, val in orderedopts:
if (cliname not in optdict and
cliname in virtargmap and
virtargmap[cliname].is_list):
optdict[cliname] = []
if type(optdict.get(cliname)) is list:
optdict[cliname].append(val)
else:
optdict[cliname] = val
return optdict, orderedopts
class VirtCLIParser(object):
"""
Parse a compound arg string like --option foo=bar,baz=12. This is
the desired interface to VirtCLIArgument and VirtCLIOptionString.
A command line argument just extends this interface, implements
_init_params, and calls set_param in the order it wants the options
parsed on the command line. See existing impls examples of how to
do all sorts of crazy stuff.
set_param must be set unconditionally (ex from _init_params and not
from overriding _parse), so that we can show all options when the
user requests command line introspection like --disk=?
"""
objclass = None
def __init__(self, cli_arg_name):
"""
These values should be set by subclasses in _init_params
@cli_arg_name: The command line argument this maps to, so
"hostdev" for --hostdev
@guest: Will be set parse(), the toplevel Guest object
@remove_first: Passed to VirtOptionString
@check_none: If the parsed option string is just 'none', return None
@support_cb: An extra support check function for further validation.
Called before the virtinst object is altered. Take arguments
(inst, attrname, cliname)
@clear_attr: If the user requests to clear the XML (--disk clearxml),
this is the property name we grab from inst to actually clear
(so 'security' to get guest.security). If it's True, then
clear inst (in the case of devices)
"""
self.cli_arg_name = cli_arg_name
# This is the name of the variable that argparse will set in
# the result of parse_args()
self.option_variable_name = cli_arg_name.replace("-", "_")
self.guest = None
self.remove_first = None
self.check_none = False
self.support_cb = None
self.clear_attr = None
self._params = []
self._inparse = False
self.__init_global_params()
self._init_params()
def __init_global_params(self):
def set_clearxml_cb(opts, inst, cliname, val):
ignore = opts = cliname
if not self.objclass and not self.clear_attr:
raise RuntimeError("Don't know how to clearxml --%s" %
self.cli_arg_name)
if val is not True:
return
if self.clear_attr:
getattr(inst, self.clear_attr).clear()
else:
inst.clear()
self.set_param(None, "clearxml",
setter_cb=set_clearxml_cb, is_onoff=True)
def check_introspection(self, option):
for optstr in util.listify(option):
if optstr == "?" or optstr == "help":
print "--%s options:" % self.cli_arg_name
for arg in sorted(self._params, key=lambda p: p.cliname):
print " %s" % arg.cliname
print
return True
return False
def set_param(self, *args, **kwargs):
if self._inparse:
# Otherwise we might break command line introspection
raise RuntimeError("programming error: Can not call set_param "
"from parse handler.")
self._params.append(_VirtCLIArgument(*args, **kwargs))
def parse(self, guest, optlist, inst, validate=True):
optlist = util.listify(optlist)
editting = bool(inst)
if editting and optlist:
# If an object is passed in, we are updating it in place, and
# only use the last command line occurrence, eg. from virt-xml
optlist = [optlist[-1]]
ret = []
for optstr in optlist:
new_object = False
optinst = inst
if self.objclass and not inst:
if guest.child_class_is_singleton(self.objclass):
optinst = guest.list_children_for_class(
self.objclass)[0]
else:
new_object = True
optinst = self.objclass(guest.conn) # pylint: disable=not-callable
try:
objs = self._parse_single_optstr(guest, optstr, optinst)
for obj in util.listify(objs):
if not new_object:
break
if validate:
obj.validate()
guest.add_child(obj)
ret += util.listify(objs)
except Exception, e:
logging.debug("Exception parsing inst=%s optstr=%s",
inst, optstr, exc_info=True)
fail(_("Error: --%(cli_arg_name)s %(options)s: %(err)s") %
{"cli_arg_name": self.cli_arg_name,
"options": optstr, "err": str(e)})
if not ret:
return None
if len(ret) == 1:
return ret[0]
return ret
def lookup_child_from_option_string(self, guest, optstr):
"""
Given a passed option string, search the guests' child list
for all objects which match the passed options.
Used only by virt-xml --edit lookups
"""
ret = []
objlist = guest.list_children_for_class(self.objclass)
for inst in objlist:
try:
opts = VirtOptionString(optstr, self._params,
self.remove_first)
valid = True
for param in self._params:
if param.parse(opts, inst,
support_cb=None, lookup=True) is False:
valid = False
break
if valid:
ret.append(inst)
except Exception, e:
logging.debug("Exception parsing inst=%s optstr=%s",
inst, optstr, exc_info=True)
fail(_("Error: --%(cli_arg_name)s %(options)s: %(err)s") %
{"cli_arg_name": self.cli_arg_name,
"options": optstr, "err": str(e)})
return ret
def _parse_single_optstr(self, guest, optstr, inst):
if not optstr:
return None
if self.check_none and optstr == "none":
return None
if not inst:
inst = guest
try:
self.guest = guest
self._inparse = True
opts = VirtOptionString(optstr, self._params, self.remove_first)
return self._parse(opts, inst)
finally:
self.guest = None
self._inparse = False
def _parse(self, opts, inst):
for param in self._params:
param.parse(opts, inst, self.support_cb)
opts.check_leftover_opts()
return inst
def _init_params(self):
raise NotImplementedError()
###################
# --check parsing #
###################
def convert_old_force(options):
if options.force:
if not options.check:
options.check = "all=off"
del(options.force)
class ParseCLICheck(VirtCLIParser):
# This sets properties on the _GlobalState objects
def _init_params(self):
def _set_check(opts, inst, cliname, val):
ignore = opts
inst.set_validation_check(cliname, val)
self.set_param(None, "path_in_use",
is_onoff=True, setter_cb=_set_check)
self.set_param(None, "disk_size",
is_onoff=True, setter_cb=_set_check)
self.set_param(None, "path_exists",
is_onoff=True, setter_cb=_set_check)
self.set_param("all_checks", "all", is_onoff=True)
def parse_check(checkstr):
# Overwrite this for each parse,
parser = ParseCLICheck("check")
parser.parse(None, checkstr, get_global_state())
######################
# --metadata parsing #
######################
class ParserMetadata(VirtCLIParser):
def _init_params(self):
self.set_param("name", "name", can_comma=True)
self.set_param("title", "title", can_comma=True)
self.set_param("uuid", "uuid")
self.set_param("description", "description", can_comma=True)
####################
# --events parsing #
####################
class ParserEvents(VirtCLIParser):
def _init_params(self):
self.set_param("on_poweroff", "on_poweroff")
self.set_param("on_reboot", "on_reboot")
self.set_param("on_crash", "on_crash")
######################
# --resource parsing #
######################
class ParserResource(VirtCLIParser):
def _init_params(self):
self.objclass = DomainResource
self.remove_first = "partition"
self.set_param("partition", "partition")
######################
# --numatune parsing #
######################
class ParserNumatune(VirtCLIParser):
def _init_params(self):
self.objclass = DomainNumatune
self.remove_first = "nodeset"
self.set_param("memory_nodeset", "nodeset", can_comma=True)
self.set_param("memory_mode", "mode")
####################
# --memory parsing #
####################
class ParserMemory(VirtCLIParser):
def _init_params(self):
self.remove_first = "memory"
def set_memory_cb(opts, inst, cliname, val):
ignore = opts
setattr(inst, cliname, int(val) * 1024)
self.set_param("memory", "memory", setter_cb=set_memory_cb)
self.set_param("maxmemory", "maxmemory", setter_cb=set_memory_cb)
self.set_param("memoryBacking.hugepages", "hugepages", is_onoff=True)
#####################
# --memtune parsing #
#####################
class ParserMemorytune(VirtCLIParser):
def _init_params(self):
self.objclass = DomainMemorytune
self.remove_first = "soft_limit"
self.set_param("hard_limit", "hard_limit")
self.set_param("soft_limit", "soft_limit")
self.set_param("swap_hard_limit", "swap_hard_limit")
self.set_param("min_guarantee", "min_guarantee")
###################
# --vcpus parsing #
###################
class ParserVCPU(VirtCLIParser):
def _init_params(self):
self.remove_first = "vcpus"
self.set_param("cpu.sockets", "sockets")
self.set_param("cpu.cores", "cores")
self.set_param("cpu.threads", "threads")
def set_vcpus_cb(opts, inst, cliname, val):
ignore = cliname
attrname = ("maxvcpus" in opts.opts) and "curvcpus" or "vcpus"
setattr(inst, attrname, val)
self.set_param(None, "vcpus", setter_cb=set_vcpus_cb)
self.set_param("vcpus", "maxvcpus")
def set_cpuset_cb(opts, inst, cliname, val):
if val == "auto":
try:
val = DomainNumatune.generate_cpuset(
inst.conn, inst.memory)
logging.debug("Auto cpuset is: %s", val)
except Exception, e:
logging.error("Not setting cpuset: %s", str(e))
val = None
if val:
inst.cpuset = val
self.set_param(None, "cpuset", can_comma=True,
setter_cb=set_cpuset_cb)
def _parse(self, opts, inst):
set_from_top = ("maxvcpus" not in opts.opts and
"vcpus" not in opts.opts)
ret = VirtCLIParser._parse(self, opts, inst)
if set_from_top:
inst.vcpus = inst.cpu.vcpus_from_topology()
return ret
#################
# --cpu parsing #
#################
class ParserCPU(VirtCLIParser):
def _init_params(self):
self.objclass = CPU
self.remove_first = "model"
def set_model_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
if val == "host":
val = inst.SPECIAL_MODE_HOST_MODEL
if val == "none":
val = inst.SPECIAL_MODE_CLEAR
if val in inst.SPECIAL_MODES:
inst.set_special_mode(val)
else:
inst.model = val
def set_feature_cb(opts, inst, cliname, val):
ignore = opts
policy = cliname
for feature_name in util.listify(val):
featureobj = None
for f in inst.features:
if f.name == feature_name:
featureobj = f
break
if featureobj:
featureobj.policy = policy
else:
inst.add_feature(feature_name, policy)
self.set_param(None, "model", setter_cb=set_model_cb)
self.set_param("mode", "mode")
self.set_param("match", "match")
self.set_param("vendor", "vendor")
self.set_param(None, "force", is_list=True, setter_cb=set_feature_cb)
self.set_param(None, "require", is_list=True, setter_cb=set_feature_cb)
self.set_param(None, "optional", is_list=True, setter_cb=set_feature_cb)
self.set_param(None, "disable", is_list=True, setter_cb=set_feature_cb)
self.set_param(None, "forbid", is_list=True, setter_cb=set_feature_cb)
def _parse(self, optsobj, inst):
opts = optsobj.opts
# Convert +feature, -feature into expected format
for key, value in opts.items():
policy = None
if value or len(key) == 1:
continue
if key.startswith("+"):
policy = "force"
elif key.startswith("-"):
policy = "disable"
if policy:
del(opts[key])
if opts.get(policy) is None:
opts[policy] = []
opts[policy].append(key[1:])
return VirtCLIParser._parse(self, optsobj, inst)
##################
# --boot parsing #
##################
class ParserBoot(VirtCLIParser):
def _init_params(self):
self.clear_attr = "os"
# UEFI depends on these bits, so set them first
self.set_param("os.arch", "arch")
self.set_param("type", "domain_type")
self.set_param("os.os_type", "os_type")
self.set_param("emulator", "emulator")
def set_uefi(opts, inst, cliname, val):
ignore = opts
ignore = cliname
ignore = val
inst.set_uefi_default()
self.set_param(None, "uefi", setter_cb=set_uefi, is_novalue=True)
self.set_param("os.useserial", "useserial", is_onoff=True)
self.set_param("os.enable_bootmenu", "menu", is_onoff=True)
self.set_param("os.kernel", "kernel")
self.set_param("os.initrd", "initrd")
self.set_param("os.dtb", "dtb")
self.set_param("os.loader", "loader")
self.set_param("os.loader_ro", "loader_ro", is_onoff=True)
self.set_param("os.loader_type", "loader_type")
self.set_param("os.nvram", "nvram")
self.set_param("os.nvram_template", "nvram_template")
self.set_param("os.kernel_args", "kernel_args",
aliases=["extra_args"], can_comma=True)
self.set_param("os.init", "init")
self.set_param("os.machine", "machine")
def set_initargs_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
inst.os.set_initargs_string(val)
self.set_param("os.initargs", "initargs", setter_cb=set_initargs_cb)
# Order matters for boot devices, we handle it specially in parse
def noset_cb(val):
ignore = val
for b in OSXML.BOOT_DEVICES:
self.set_param(noset_cb, b)
def _parse(self, opts, inst):
# Build boot order
boot_order = []
for cliname, ignore in opts.orderedopts:
if cliname not in inst.os.BOOT_DEVICES:
continue
del(opts.opts[cliname])
if cliname not in boot_order:
boot_order.append(cliname)
if boot_order:
inst.os.bootorder = boot_order
VirtCLIParser._parse(self, opts, inst)
###################
# --idmap parsing #
###################
class ParserIdmap(VirtCLIParser):
def _init_params(self):
self.objclass = IdMap
self.set_param("uid_start", "uid_start")
self.set_param("uid_target", "uid_target")
self.set_param("uid_count", "uid_count")
self.set_param("gid_start", "gid_start")
self.set_param("gid_target", "gid_target")
self.set_param("gid_count", "gid_count")
######################
# --security parsing #
######################
class ParserSecurity(VirtCLIParser):
def _init_params(self):
self.objclass = Seclabel
self.set_param("type", "type")
self.set_param("model", "model")
self.set_param("relabel", "relabel", is_onoff=True)
self.set_param("label", "label", can_comma=True)
self.set_param("baselabel", "label", can_comma=True)
######################
# --features parsing #
######################
class ParserFeatures(VirtCLIParser):
def _init_params(self):
self.objclass = DomainFeatures
self.set_param("acpi", "acpi", is_onoff=True)
self.set_param("apic", "apic", is_onoff=True)
self.set_param("pae", "pae", is_onoff=True)
self.set_param("privnet", "privnet",
is_onoff=True)
self.set_param("hap", "hap",
is_onoff=True)
self.set_param("viridian", "viridian",
is_onoff=True)
self.set_param("eoi", "eoi", is_onoff=True)
self.set_param("pmu", "pmu", is_onoff=True)
self.set_param("hyperv_vapic", "hyperv_vapic",
is_onoff=True)
self.set_param("hyperv_relaxed", "hyperv_relaxed",
is_onoff=True)
self.set_param("hyperv_spinlocks", "hyperv_spinlocks",
is_onoff=True)
self.set_param("hyperv_spinlocks_retries",
"hyperv_spinlocks_retries")
self.set_param("vmport", "vmport", is_onoff=True)
self.set_param("kvm_hidden", "kvm_hidden", is_onoff=True)
self.set_param("pvspinlock", "pvspinlock", is_onoff=True)
###################
# --clock parsing #
###################
class ParserClock(VirtCLIParser):
def _init_params(self):
self.objclass = Clock
self.set_param("offset", "offset")
def set_timer(opts, inst, cliname, val):
ignore = opts
tname, attrname = cliname.split("_")
timerobj = None
for t in inst.timers:
if t.name == tname:
timerobj = t
break
if not timerobj:
timerobj = inst.add_timer()
timerobj.name = tname
setattr(timerobj, attrname, val)
for tname in Clock.TIMER_NAMES:
self.set_param(None, tname + "_present",
is_onoff=True,
setter_cb=set_timer)
self.set_param(None, tname + "_tickpolicy", setter_cb=set_timer)
################
# --pm parsing #
################
class ParserPM(VirtCLIParser):
def _init_params(self):
self.objclass = PM
self.set_param("suspend_to_mem", "suspend_to_mem", is_onoff=True)
self.set_param("suspend_to_disk", "suspend_to_disk", is_onoff=True)
##########################
# Guest <device> parsing #
##########################
##################
# --disk parsing #
##################
def _default_image_file_format(conn):
if conn.check_support(conn.SUPPORT_CONN_DEFAULT_QCOW2):
return "qcow2"
return "raw"
def _get_default_image_format(conn, poolobj):
tmpvol = StorageVolume(conn)
tmpvol.pool = poolobj
if tmpvol.file_type != StorageVolume.TYPE_FILE:
return None
return _default_image_file_format(conn)
def _generate_new_volume_name(guest, poolobj, fmt):
collidelist = []
for disk in guest.get_devices("disk"):
if (disk.get_vol_install() and
disk.get_vol_install().pool.name() == poolobj.name()):
collidelist.append(os.path.basename(disk.path))
ext = StorageVolume.get_file_extension_for_format(fmt)
return StorageVolume.find_free_name(
poolobj, guest.name, suffix=ext, collidelist=collidelist)
class ParserDisk(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualDisk
self.remove_first = "path"
def noset_cb(opts, inst, cliname, val):
ignore = opts, inst, cliname, val
# These are all handled specially in _parse
self.set_param(None, "backing_store", setter_cb=noset_cb)
self.set_param(None, "pool", setter_cb=noset_cb)
self.set_param(None, "vol", setter_cb=noset_cb)
self.set_param(None, "size", setter_cb=noset_cb)
self.set_param(None, "format", setter_cb=noset_cb)
self.set_param(None, "sparse", setter_cb=noset_cb)
self.set_param("source_pool", "source_pool")
self.set_param("source_volume", "source_volume")
self.set_param("source_name", "source_name")
self.set_param("source_protocol", "source_protocol")
self.set_param("source_host_name", "source_host_name")
self.set_param("source_host_port", "source_host_port")
self.set_param("source_host_socket", "source_host_socket")
self.set_param("source_host_transport", "source_host_transport")
self.set_param("path", "path")
self.set_param("device", "device")
self.set_param("bus", "bus")
self.set_param("removable", "removable", is_onoff=True)
self.set_param("driver_cache", "cache")
self.set_param("driver_discard", "discard")
self.set_param("driver_name", "driver_name")
self.set_param("driver_type", "driver_type")
self.set_param("driver_io", "io")
self.set_param("error_policy", "error_policy")
self.set_param("serial", "serial")
self.set_param("target", "target")
self.set_param("startup_policy", "startup_policy")
self.set_param("read_only", "readonly", is_onoff=True)
self.set_param("shareable", "shareable", is_onoff=True)
self.set_param("boot.order", "boot_order")
self.set_param("iotune_rbs", "read_bytes_sec")
self.set_param("iotune_wbs", "write_bytes_sec")
self.set_param("iotune_tbs", "total_bytes_sec")
self.set_param("iotune_ris", "read_iops_sec")
self.set_param("iotune_wis", "write_iops_sec")
self.set_param("iotune_tis", "total_iops_sec")
self.set_param("sgio", "sgio")
def _parse(self, opts, inst):
if opts.fullopts == "none":
return
def parse_size(val):
if val is None:
return None
try:
return float(val)
except Exception, e:
fail(_("Improper value for 'size': %s") % str(e))
def convert_perms(val):
if val is None:
return
if val == "ro":
opts.opts["readonly"] = "on"
elif val == "sh":
opts.opts["shareable"] = "on"
elif val == "rw":
# It's default. Nothing to do.
pass
else:
fail(_("Unknown '%s' value '%s'") % ("perms", val))
has_path = "path" in opts.opts
backing_store = opts.get_opt_param("backing_store")
poolname = opts.get_opt_param("pool")
volname = opts.get_opt_param("vol")
size = parse_size(opts.get_opt_param("size"))
fmt = opts.get_opt_param("format")
sparse = _on_off_convert("sparse", opts.get_opt_param("sparse"))
convert_perms(opts.get_opt_param("perms"))
has_type_volume = ("source_pool" in opts.opts or
"source_volume" in opts.opts)
has_type_network = ("source_protocol" in opts.opts)
optcount = sum([bool(p) for p in [has_path, poolname, volname,
has_type_volume, has_type_network]])
if optcount > 1:
fail(_("Cannot specify more than 1 storage path"))
if optcount == 0 and size:
# Saw something like --disk size=X, have it imply pool=default
poolname = "default"
if volname:
if volname.count("/") != 1:
raise ValueError(_("Storage volume must be specified as "
"vol=poolname/volname"))
poolname, volname = volname.split("/")
logging.debug("Parsed --disk volume as: pool=%s vol=%s",
poolname, volname)
VirtCLIParser._parse(self, opts, inst)
# Generate and fill in the disk source info
newvolname = None
poolobj = None
if poolname:
if poolname == "default":
StoragePool.build_default_pool(self.guest.conn)
poolobj = self.guest.conn.storagePoolLookupByName(poolname)
if volname:
vol_object = poolobj.storageVolLookupByName(volname)
inst.set_vol_object(vol_object, poolobj)
poolobj = None
if ((poolobj or inst.wants_storage_creation()) and
(fmt or size or sparse or backing_store)):
if not poolobj:
poolobj = inst.get_parent_pool()
newvolname = os.path.basename(inst.path)
if poolobj and not fmt:
fmt = _get_default_image_format(self.guest.conn, poolobj)
if newvolname is None:
newvolname = _generate_new_volume_name(self.guest, poolobj,
fmt)
vol_install = VirtualDisk.build_vol_install(
self.guest.conn, newvolname, poolobj, size, sparse,
fmt=fmt, backing_store=backing_store)
inst.set_vol_install(vol_install)
if not inst.target:
skip_targets = [d.target for d in self.guest.get_devices("disk")]
inst.generate_target(skip_targets)
inst.cli_generated_target = True
return inst
#####################
# --network parsing #
#####################
class ParserNetwork(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualNetworkInterface
self.remove_first = "type"
def set_mac_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
if val == "RANDOM":
return None
inst.macaddr = val
return val
def set_type_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
if val == "default":
inst.set_default_source()
else:
inst.type = val
self.set_param("type", "type", setter_cb=set_type_cb)
self.set_param("source", "source")
self.set_param("source_mode", "source_mode")
self.set_param("portgroup", "portgroup")
self.set_param("target_dev", "target")
self.set_param("model", "model")
self.set_param("macaddr", "mac", setter_cb=set_mac_cb)
self.set_param("filterref", "filterref")
self.set_param("boot.order", "boot_order")
self.set_param("driver_name", "driver_name")
self.set_param("driver_queues", "driver_queues")
self.set_param("virtualport.type", "virtualport_type")
# For 802.1Qbg
self.set_param("virtualport.managerid", "virtualport_managerid")
self.set_param("virtualport.typeid", "virtualport_typeid")
self.set_param("virtualport.typeidversion",
"virtualport_typeidversion")
self.set_param("virtualport.instanceid", "virtualport_instanceid")
# For openvswitch & 802.1Qbh
self.set_param("virtualport.profileid", "virtualport_profileid")
# For openvswitch & midonet
self.set_param("virtualport.interfaceid", "virtualport_interfaceid")
def _parse(self, optsobj, inst):
if optsobj.fullopts == "none":
return
opts = optsobj.opts
if "type" not in opts:
if "network" in opts:
opts["type"] = VirtualNetworkInterface.TYPE_VIRTUAL
opts["source"] = opts.pop("network")
elif "bridge" in opts:
opts["type"] = VirtualNetworkInterface.TYPE_BRIDGE
opts["source"] = opts.pop("bridge")
return VirtCLIParser._parse(self, optsobj, inst)
######################
# --graphics parsing #
######################
class ParserGraphics(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualGraphics
self.remove_first = "type"
def set_keymap_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
from . import hostkeymap
if not val:
val = None
elif val.lower() == "local":
val = VirtualGraphics.KEYMAP_LOCAL
elif val.lower() == "none":
val = None
else:
use_keymap = hostkeymap.sanitize_keymap(val)
if not use_keymap:
raise ValueError(
_("Didn't match keymap '%s' in keytable!") % val)
val = use_keymap
inst.keymap = val
def set_type_cb(opts, inst, cliname, val):
ignore = opts
if val == "default":
return
inst.type = val
self.set_param(None, "type", setter_cb=set_type_cb)
self.set_param("port", "port")
self.set_param("tlsPort", "tlsport")
self.set_param("listen", "listen")
self.set_param(None, "keymap", setter_cb=set_keymap_cb)
self.set_param("passwd", "password")
self.set_param("passwdValidTo", "passwordvalidto")
self.set_param("connected", "connected")
self.set_param("defaultMode", "defaultMode")
self.set_param("image_compression", "image_compression")
self.set_param("streaming_mode", "streaming_mode")
self.set_param("clipboard_copypaste", "clipboard_copypaste",
is_onoff=True)
self.set_param("mouse_mode", "mouse_mode")
self.set_param("filetransfer_enable", "filetransfer_enable",
is_onoff=True)
def _parse(self, opts, inst):
if opts.fullopts == "none":
self.guest.skip_default_graphics = True
return
return VirtCLIParser._parse(self, opts, inst)
########################
# --controller parsing #
########################
class ParserController(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualController
self.remove_first = "type"
self.set_param("type", "type")
self.set_param("model", "model")
self.set_param("index", "index")
self.set_param("master_startport", "master")
def set_server_cb(opts, inst, cliname, val):
ignore = opts = cliname
inst.address.set_addrstr(val)
self.set_param(None, "address", setter_cb=set_server_cb)
def _parse(self, opts, inst):
if opts.fullopts == "usb2":
return VirtualController.get_usb2_controllers(inst.conn)
elif opts.fullopts == "usb3":
inst.type = "usb"
inst.model = "nec-xhci"
return inst
return VirtCLIParser._parse(self, opts, inst)
###################
# --input parsing #
###################
class ParserInput(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualInputDevice
self.remove_first = "type"
self.set_param("type", "type")
self.set_param("bus", "bus")
#######################
# --smartcard parsing #
#######################
class ParserSmartcard(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualSmartCardDevice
self.remove_first = "mode"
self.check_none = True
self.set_param("mode", "mode")
self.set_param("type", "type")
######################
# --redirdev parsing #
######################
class ParserRedir(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualRedirDevice
self.remove_first = "bus"
self.set_param("bus", "bus")
self.set_param("type", "type")
self.set_param("boot.order", "boot_order")
def set_server_cb(opts, inst, cliname, val):
ignore = opts = cliname
inst.parse_friendly_server(val)
self.set_param(None, "server", setter_cb=set_server_cb)
def _parse(self, opts, inst):
if opts.fullopts == "none":
self.guest.skip_default_usbredir = True
return
return VirtCLIParser._parse(self, opts, inst)
#################
# --tpm parsing #
#################
class ParserTPM(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualTPMDevice
self.remove_first = "type"
self.check_none = True
self.set_param("type", "type")
self.set_param("model", "model")
self.set_param("device_path", "path")
def _parse(self, opts, inst):
if (opts.opts.get("type", "").startswith("/")):
opts.opts["path"] = opts.opts.pop("type")
return VirtCLIParser._parse(self, opts, inst)
#################
# --rng parsing #
#################
class ParserRNG(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualRNGDevice
self.remove_first = "type"
self.check_none = True
def set_hosts_cb(opts, inst, cliname, val):
namemap = {}
inst.backend_type = self._cli_backend_type
if self._cli_backend_mode == "connect":
namemap["backend_host"] = "connect_host"
namemap["backend_service"] = "connect_service"
if self._cli_backend_mode == "bind":
namemap["backend_host"] = "bind_host"
namemap["backend_service"] = "bind_service"
if self._cli_backend_type == "udp":
namemap["backend_connect_host"] = "connect_host"
namemap["backend_connect_service"] = "connect_service"
if cliname in namemap:
setattr(inst, namemap[cliname], val)
def set_backend_cb(opts, inst, cliname, val):
ignore = opts
ignore = inst
# pylint: disable=attribute-defined-outside-init
if cliname == "backend_mode":
self._cli_backend_mode = val
elif cliname == "backend_type":
self._cli_backend_type = val
self.set_param("type", "type")
self.set_param(None, "backend_mode", setter_cb=set_backend_cb)
self.set_param(None, "backend_type", setter_cb=set_backend_cb)
self.set_param(None, "backend_host", setter_cb=set_hosts_cb)
self.set_param(None, "backend_service", setter_cb=set_hosts_cb)
self.set_param(None, "backend_connect_host", setter_cb=set_hosts_cb)
self.set_param(None, "backend_connect_service", setter_cb=set_hosts_cb)
self.set_param("device", "device")
self.set_param("model", "model")
self.set_param("rate_bytes", "rate_bytes")
self.set_param("rate_period", "rate_period")
def _parse(self, optsobj, inst):
opts = optsobj.opts
# pylint: disable=attribute-defined-outside-init
# Defined outside init, but its easier this way
self._cli_backend_mode = "connect"
self._cli_backend_type = "udp"
# pylint: enable=attribute-defined-outside-init
if opts.get("type", "").startswith("/"):
# Allow --rng /dev/random
opts["device"] = opts.pop("type")
opts["type"] = "random"
return VirtCLIParser._parse(self, optsobj, inst)
######################
# --watchdog parsing #
######################
class ParserWatchdog(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualWatchdog
self.remove_first = "model"
self.set_param("model", "model")
self.set_param("action", "action")
########################
# --memballoon parsing #
########################
class ParserMemballoon(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualMemballoon
self.remove_first = "model"
self.set_param("model", "model")
###################
# --panic parsing #
###################
class ParserPanic(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualPanicDevice
self.remove_first = "iobase"
def set_iobase_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
if val == "default":
return
inst.iobase = val
self.set_param(None, "iobase", setter_cb=set_iobase_cb)
#######################
# --blkiotune parsing #
#######################
class ParserBlkiotune(VirtCLIParser):
def _init_params(self):
self.objclass = DomainBlkiotune
self.remove_first = "weight"
self.set_param("weight", "weight")
self.set_param("device_path", "device_path")
self.set_param("device_weight", "device_weight")
########################
# --memorybacking parsing #
########################
class ParserMemorybacking(VirtCLIParser):
def _init_params(self):
self.objclass = DomainMemorybacking
self.set_param("hugepages", "hugepages", is_onoff=True)
self.set_param("page_size", "size")
self.set_param("page_unit", "unit")
self.set_param("page_nodeset", "nodeset", can_comma=True)
self.set_param("nosharepages", "nosharepages", is_onoff=True)
self.set_param("locked", "locked", is_onoff=True)
######################################################
# --serial, --parallel, --channel, --console parsing #
######################################################
class _ParserChar(VirtCLIParser):
def _init_params(self):
self.remove_first = "char_type"
def support_check(inst, attrname, cliname):
if type(attrname) is not str:
return
if not inst.supports_property(attrname):
raise ValueError(_("%(devtype)s type '%(chartype)s' does not "
"support '%(optname)s' option.") %
{"devtype" : inst.virtual_device_type,
"chartype": inst.type,
"optname" : cliname})
self.support_cb = support_check
self.set_param("type", "char_type")
self.set_param("source_path", "path")
self.set_param("protocol", "protocol")
self.set_param("target_type", "target_type")
self.set_param("target_name", "name")
def set_host_cb(opts, inst, cliname, val):
ignore = cliname
if ("bind_host" not in opts.opts and
opts.opts.get("mode", None) == "bind"):
inst.set_friendly_bind(val)
else:
inst.set_friendly_source(val)
self.set_param(None, "host", setter_cb=set_host_cb)
def set_bind_cb(opts, inst, cliname, val):
ignore = opts = cliname
inst.set_friendly_bind(val)
self.set_param(None, "bind_host", setter_cb=set_bind_cb)
def set_target_cb(opts, inst, cliname, val):
ignore = opts = cliname
inst.set_friendly_target(val)
self.set_param(None, "target_address", setter_cb=set_target_cb)
self.set_param("source_mode", "mode")
def _parse(self, opts, inst):
if opts.fullopts == "none" and inst.virtual_device_type == "console":
self.guest.skip_default_console = True
return
if opts.fullopts == "none" and inst.virtual_device_type == "channel":
self.guest.skip_default_channel = True
return
return VirtCLIParser._parse(self, opts, inst)
class ParserSerial(_ParserChar):
objclass = VirtualSerialDevice
class ParserParallel(_ParserChar):
objclass = VirtualParallelDevice
class ParserChannel(_ParserChar):
objclass = VirtualChannelDevice
class ParserConsole(_ParserChar):
objclass = VirtualConsoleDevice
########################
# --filesystem parsing #
########################
class ParserFilesystem(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualFilesystem
self.remove_first = ["source", "target"]
self.set_param("type", "type")
self.set_param("accessmode", "accessmode", aliases=["mode"])
self.set_param("source", "source")
self.set_param("target", "target")
###################
# --video parsing #
###################
class ParserVideo(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualVideoDevice
self.remove_first = "model"
self.set_param("model", "model", ignore_default=True)
###################
# --sound parsing #
###################
class ParserSound(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualAudio
self.remove_first = "model"
self.set_param("model", "model", ignore_default=True)
def _parse(self, opts, inst):
if opts.fullopts == "none":
self.guest.skip_default_sound = True
return
return VirtCLIParser._parse(self, opts, inst)
#########################
# --hostdev parsing #
#########################
class ParserHostdev(VirtCLIParser):
def _init_params(self):
self.objclass = VirtualHostDevice
self.remove_first = "name"
# If using the name_lookup_cb, this saves us repeatedly trying to
# lookup the nodedev
_nodedev_lookup_cache = {}
def set_name_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
val = NodeDevice.lookupNodedevFromString(inst.conn, val)
inst.set_from_nodedev(val)
def name_lookup_cb(opts, inst, cliname, val):
ignore = opts
ignore = cliname
if val not in _nodedev_lookup_cache:
_nodedev_lookup_cache[val] = \
NodeDevice.lookupNodedevFromString(inst.conn, val)
nodedev = _nodedev_lookup_cache[val]
return nodedev.compare_to_hostdev(inst)
self.set_param(None, "name",
setter_cb=set_name_cb, lookup_cb=name_lookup_cb)
self.set_param("driver_name", "driver_name")
self.set_param("boot.order", "boot_order")
self.set_param("rom_bar", "rom_bar", is_onoff=True)
###########################
# Register parser classes #
###########################
def build_parser_map(options, skip=None, only=None):
"""
Build a dictionary with mapping of cli-name->parserinstance, so
--vcpus -> ParserVCPU object.
"""
parsermap = {}
def register_parser(cli_arg_name, parserclass):
if cli_arg_name in util.listify(skip):
return
if only and cli_arg_name not in util.listify(only):
return
parserobj = parserclass(cli_arg_name)
if not hasattr(options, parserobj.option_variable_name):
raise RuntimeError("programming error: unknown option=%s "
"cliname=%s class=%s" %
(parserobj.option_variable_name,
parserobj.cli_arg_name, parserclass))
parsermap[parserobj.option_variable_name] = parserobj
register_parser("metadata", ParserMetadata)
register_parser("events", ParserEvents)
register_parser("resource", ParserResource)
register_parser("memory", ParserMemory)
register_parser("memtune", ParserMemorytune)
register_parser("vcpus", ParserVCPU)
register_parser("cpu", ParserCPU)
register_parser("numatune", ParserNumatune)
register_parser("blkiotune", ParserBlkiotune)
register_parser("memorybacking", ParserMemorybacking)
register_parser("idmap", ParserIdmap)
register_parser("boot", ParserBoot)
register_parser("security", ParserSecurity)
register_parser("features", ParserFeatures)
register_parser("clock", ParserClock)
register_parser("pm", ParserPM)
register_parser("features", ParserFeatures)
register_parser("disk", ParserDisk)
register_parser("network", ParserNetwork)
register_parser("graphics", ParserGraphics)
register_parser("controller", ParserController)
register_parser("input", ParserInput)
register_parser("smartcard", ParserSmartcard)
register_parser("redirdev", ParserRedir)
register_parser("tpm", ParserTPM)
register_parser("rng", ParserRNG)
register_parser("watchdog", ParserWatchdog)
register_parser("memballoon", ParserMemballoon)
register_parser("serial", ParserSerial)
register_parser("parallel", ParserParallel)
register_parser("channel", ParserChannel)
register_parser("console", ParserConsole)
register_parser("filesystem", ParserFilesystem)
register_parser("video", ParserVideo)
register_parser("sound", ParserSound)
register_parser("hostdev", ParserHostdev)
register_parser("panic", ParserPanic)
return parsermap
def parse_option_strings(parsermap, options, guest, instlist, update=False):
"""
Iterate over the parsermap, and launch the associated parser
function for every value that was filled in on 'options', which
came from argparse/the command line.
@update: If we are updating an existing guest, like from virt-xml
"""
instlist = util.listify(instlist)
if not instlist:
instlist = [None]
ret = []
for option_variable_name in dir(options):
if option_variable_name not in parsermap:
continue
for inst in util.listify(instlist):
parseret = parsermap[option_variable_name].parse(
guest, getattr(options, option_variable_name), inst,
validate=not update)
ret += util.listify(parseret)
return ret
def check_option_introspection(options, parsermap):
"""
Check if the user requested option introspection with ex: '--disk=?'
"""
ret = False
for option_variable_name in dir(options):
if option_variable_name not in parsermap:
continue
if parsermap[option_variable_name].check_introspection(
getattr(options, option_variable_name)):
ret = True
return ret
|
Akasurde/virt-manager
|
virtinst/cli.py
|
Python
|
gpl-2.0
| 79,481
|
"""
WSGI config for elmolino project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "elmolino.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
palichis/elmolino
|
elmolino/wsgi.py
|
Python
|
gpl-2.0
| 391
|
from django.core.urlresolvers import resolve
from django.template.loader import render_to_string
from django.test import TestCase
from django.http import HttpRequest
from django.utils.html import escape
from unittest import skip
from lists.views import home_page
from lists.models import Item, List
from lists.forms import (
ItemForm, ExistingListItemForm,
EMPTY_ITEM_ERROR, DUPLICATE_ITEM_ERROR
)
class HomePageTest(TestCase):
def test_home_page_renders_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_item_form(self):
response = self.client.get('/')
self.assertIsInstance(response.context['form'], ItemForm)
class ListViewTest(TestCase):
def post_invalid_input(self):
list_ = List.objects.create()
return self.client.post(
'/lists/%d/' % (list_.id),
data={'text': ''}
)
def test_for_invalid_input_nothing_saved_to_db(self):
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ExistingListItemForm)
def test_for_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_duplicate_item_validation_errors_end_up_on_lists_page(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text='textey')
response = self.client.post(
'/lists/%d/' % (list1.id),
data={'text':'textey'}
)
expected_error = escape(DUPLICATE_ITEM_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'list.html')
self.assertEqual(Item.objects.all().count(), 1)
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertTemplateUsed(response, 'list.html')
def test_display_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='i1', list=correct_list)
Item.objects.create(text='i2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='i1o', list=other_list)
Item.objects.create(text='i2o', list=other_list)
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertContains(response, 'i1')
self.assertContains(response, 'i2')
self.assertNotContains(response, 'i1o')
self.assertNotContains(response, 'i2o')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertEqual(response.context['list'], correct_list)
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
'/lists/%d/' % (correct_list.id,),
data={'text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_POST_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post (
'/lists/%d/' % (correct_list.id,),
data={'text': 'A new item for an existing list'}
)
self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))
def test_displays_item_form(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id))
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"')
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
self.client.post(
'/lists/new',
data={'text': 'A new list item'},
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
response = self.client.post(
'/lists/new',
data={'text':'A new list item'}
)
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % (new_list.id,))
def test_for_invalid_input_renders_home_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_validation_errors_are_shown_on_home_page(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_for_invalid_input_passes_form_to_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertIsInstance(response.context['form'], ItemForm)
def test_invalid_list_items_arent_saved(self):
self.client.post('/lists/new', data={"text": ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
|
joshsmith2/superlists
|
lists/tests/test_views.py
|
Python
|
gpl-2.0
| 5,826
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-10 22:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dns', '0002_auto_20151228_0134'),
]
operations = [
migrations.CreateModel(
name='Redirection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abbr', models.CharField(max_length=100, unique=True)),
('target', models.URLField()),
],
),
]
|
garncarz/dns-server
|
dns/migrations/0003_redirection.py
|
Python
|
gpl-2.0
| 640
|
import os,sys,cv2,pdb
from sklearn.decomposition import TruncatedSVD
from pylab import *
def get_feature(imgpath):
img = cv2.imread(imgpath,0)
img = cv2.resize(img,(32,64))
img = np.float32(img)
img = img / 255
img = np.reshape(img, (1,32*64))
return img
def extract_sample_from_image(imgdir):
feats = []
for rdir, pdir, names in os.walk(imgdir+'pos'):
for name in names:
sname,ext = os.path.splitext(name)
if 0 == cmp(ext, '.jpg'):
fname = os.path.join(rdir, name)
feats.append(get_feature(fname))
for rdir, pdir, names in os.walk(imgdir+'neg'):
for name in names:
sname,ext = os.path.splitext(name)
if 0 == cmp(ext, '.jpg'):
fname = os.path.join(rdir, name)
feats.append(get_feature(fname))
sample_num = len(feats)
sample_size = feats[0].shape[1]
samples = np.zeros((sample_num, sample_size))
for k in range(sample_num):
samples[k,:] = feats[k]
print 'samples ', samples.shape[0], samples.shape[1]
return samples
def run_svd(samples):
svd = TruncatedSVD(2)
svd.fit(samples)
newsamples = svd.transform(samples)
return (svd, newsamples)
def show_svd(transformed):
sample_num = transformed.shape[0]
for k in range(sample_num):
if k*2<sample_num:
mark = 'rx'
else:
mark = 'bo'
x,y = (transformed[k,0], transformed[k,1])
plot(x,y,mark)
show()
if __name__=="__main__":
samples = extract_sample_from_image('img/')
svd, transformed = run_svd(samples)
show_svd(transformed)
|
z01nl1o02/tests
|
learn_svd.py
|
Python
|
gpl-2.0
| 1,675
|
import fsui
class TextArea(fsui.TextArea):
pass
|
FrodeSolheim/fs-uae-launcher
|
workspace/ui/text_area.py
|
Python
|
gpl-2.0
| 54
|
from ctypes import *
import ctypes
from ctypes.wintypes import *
from _ssl import SSLError
import sys
CRYPT32 = windll.Crypt32
SCHANNEL = windll.SChannel
# Lots of "Magic" constants, mainly from schannel.h
SCH_CRED_NO_SYSTEM_MAPPER = 0x00000002
SCH_CRED_NO_DEFAULT_CREDS = 0x00000010
SCH_CRED_REVOCATION_CHECK_CHAIN = 0x00000200
SCH_CRED_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT = 0x00000400
SCH_CRED_IGNORE_NO_REVOCATION_CHECK = 0x00000800
SECPKG_ATTR_REMOTE_CERT_CONTEXT = 0x53
SECPKG_ATTR_STREAM_SIZES = 4
SP_PROT_SSL3_CLIENT = 0x00000020
SP_PROT_SSL2_CLIENT = 0x00000008
SP_PROT_TLS1_1_CLIENT = 0x00000200
SCHANNEL_CRED_VERSION = 0x00000004
UNISP_NAME = "Microsoft Unified Security Protocol Provider"
SECPKG_CRED_OUTBOUND = 0x00000002
SECURITY_NATIVE_DREP = 0x00000010
SECBUFFER_VERSION = 0
SECBUFFER_EMPTY = 0
SECBUFFER_DATA = 1
SECBUFFER_TOKEN = 2
SECBUFFER_EXTRA = 5
SECBUFFER_STREAM_TRAILER = 6
SECBUFFER_STREAM_HEADER = 7
ISC_REQ_SEQUENCE_DETECT = 0x00000008
ISC_REQ_REPLAY_DETECT = 0x00000004
ISC_REQ_CONFIDENTIALITY = 0x00000010
ISC_REQ_EXTENDED_ERROR = 0x00008000
ISC_REQ_ALLOCATE_MEMORY = 0x00000100
ISC_REQ_STREAM = 0x00010000
SEC_I_CONTINUE_NEEDED = 0x00090312
SEC_I_INCOMPLETE_CREDENTIALS = 0x00090320
SEC_I_RENEGOTIATE = 0x00090321
SEC_E_INCOMPLETE_MESSAGE = 0x80090318
SEC_E_INTERNAL_ERROR = 0x80090304
SEC_E_OK = 0x00000000
class SecPkgContext_StreamSizes(Structure):
_fields_ = [("cbHeader", ULONG),
("cbTrailer", ULONG),
("cbMaximumMessage", ULONG),
("cBuffers", ULONG),
("cbBlockSize", ULONG)]
class CERT_CONTEXT(Structure):
_fields_ = [("dwCertEncodingType", DWORD),
("pbCertEncoded", c_char_p),
("cbCertEncoded", DWORD),
("pCertInfo", c_void_p),
("hCertStore", c_void_p)]
class SecBuffer(Structure):
_fields_ = [("cbBuffer", ULONG),
("BufferType", ULONG),
("pvBuffer", c_void_p)]
class SecBufferDesc(Structure):
_fields_ = [("ulVersion", ULONG),
("cBuffers", ULONG),
("pBuffers", POINTER(SecBuffer))]
class _SecHandle(Structure):
_fields_ = [("dwLower", ULONG ),
("dwUpper", ULONG )]
class SCHANNEL_CRED(Structure):
_fields_ = [("dwVersion", DWORD),
("cCreds", DWORD),
("paCred", POINTER(HANDLE)),
("hRootStore", HANDLE),
("cMappers", DWORD),
("aphMappers", POINTER(HANDLE)),
("cSupportedAlgs", DWORD),
("palgSupportedAlgs", POINTER(HANDLE)),
("grbitEnabledProtocols", DWORD),
("dwMinimumCipherStrength", DWORD),
("dwMaximumCipherStrength", DWORD),
("dwSessionLifespan", DWORD),
("dwFlags", DWORD),
("dwCredFormat", DWORD),
]
class SecurityFunctionTable(Structure):
_fields_ = [("dwVersion", ULONG),
("EnumerateSecurityPackages", WINFUNCTYPE(LONG)),
("QueryCredentialsAttributes", WINFUNCTYPE(LONG)),
("AcquireCredentialsHandle", WINFUNCTYPE(ULONG, c_void_p, c_wchar_p, ULONG, HANDLE, c_void_p, c_void_p, c_void_p, HANDLE, PULONG)),
("FreeCredentialsHandle", WINFUNCTYPE(LONG)),
("Reserved2", c_void_p),
("InitializeSecurityContext", WINFUNCTYPE(ULONG, c_void_p, c_void_p, c_wchar_p, ULONG, ULONG, ULONG, c_void_p, ULONG, c_void_p, c_void_p, POINTER(ULONG), POINTER(ULONG))),
("AcceptSecurityContext", WINFUNCTYPE(ULONG)),
("CompleteAuthToken", WINFUNCTYPE(LONG)),
("DeleteSecurityContext", WINFUNCTYPE(LONG, c_void_p)),
("ApplyControlToken", WINFUNCTYPE(LONG)),
("QueryContextAttributes", WINFUNCTYPE(LONG, c_void_p, ULONG, c_void_p)),
("ImpersonateSecurityContext", WINFUNCTYPE(LONG)),
("RevertSecurityContext", WINFUNCTYPE(LONG)),
("MakeSignature", WINFUNCTYPE(LONG)),
("VerifySignature", WINFUNCTYPE(LONG)),
("FreeContextBuffer", WINFUNCTYPE(LONG, c_void_p)),
("QuerySecurityPackageInfo", WINFUNCTYPE(LONG)),
("Reserved3", c_void_p),
("Reserved4", c_void_p),
("ExportSecurityContext", WINFUNCTYPE(LONG)),
("ImportSecurityContext", WINFUNCTYPE(LONG)),
("AddCredentials", WINFUNCTYPE(LONG)),
("Reserved8", c_void_p),
("QuerySecurityContextToken", WINFUNCTYPE(LONG)),
("EncryptMessage", WINFUNCTYPE(ULONG, HANDLE, ULONG, HANDLE, ULONG)),
("DecryptMessage", WINFUNCTYPE(ULONG, HANDLE, HANDLE, ULONG, PULONG)),
("SetContextAttributes", WINFUNCTYPE(LONG)),]
class SSLContext(object):
def __init__(self):
self._InitSecurityInterface()
self._creds = None
self._context = _SecHandle()
self._SchannelCred = None
self.reset()
def reset(self):
if self._creds is not None:
windll.Secur32.FreeCredentialsHandle(byref(self._creds))
self._creds = _SecHandle()
self._creds.dwUpper = 0;
self._creds.dwLower = 0;
self._context.dwUpper = 0;
self._context.dwLower = 0;
self._SchannelCred = SCHANNEL_CRED()
self._intialized = False
self._recv_buffer = b'' # Raw socket data
self._recv_buffer_decrypted = b'' # socket data that is decrypted
def do_handshake(self):
self.reset()
self._ClientCreateCredentials()
self._ClientHandshake()
#TODO: validate remote certificate
self._intialized = True #all communications should now be encrypted
def _ClientHandshake(self):
buffer = SecBuffer()
buffer.pvBuffer = None
buffer.BufferType = SECBUFFER_TOKEN
buffer.cbBuffer = 0
bufferGroup = SecBufferDesc()
bufferGroup.cBuffers = 1
bufferGroup.pBuffers = pointer(buffer)
bufferGroup.ulVersion = SECBUFFER_VERSION
dwSSPIFlags = ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT | ISC_REQ_CONFIDENTIALITY | ISC_REQ_EXTENDED_ERROR | ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM
dwSSPIOutFlags = DWORD()
Status = self._securityFunc.InitializeSecurityContext(byref(self._creds),
None,
c_wchar_p(self._server_hostname),
dwSSPIFlags,
0,
SECURITY_NATIVE_DREP,
None,
0,
byref(self._context),
byref(bufferGroup),
byref(dwSSPIOutFlags),
POINTER(ULONG)() )
if Status != SEC_I_CONTINUE_NEEDED and Status != SEC_E_OK:
raise SSLError(WinError(c_long(Status).value))
if Status == SEC_I_CONTINUE_NEEDED:
if buffer.cbBuffer != 0 and buffer.pvBuffer is not None:
data = string_at(buffer.pvBuffer, buffer.cbBuffer)
if self.send(data, plaintext = True) == 0:
self._securityFunc.FreeContextBuffer(buffer.pvBuffer)
self._securityFunc.DeleteSecurityContext(byref(self._context))
else:
self._securityFunc.FreeContextBuffer(buffer.pvBuffer)
(Status,extraData) = self._ClientHandshakeLoop(True)
if Status != SEC_E_OK:
raise SSLError(WinError(c_long(Status).value))
def _ClientHandshakeLoop(self, doRead):
Status = SEC_I_CONTINUE_NEEDED
dwSSPIFlags = ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT | ISC_REQ_CONFIDENTIALITY | ISC_REQ_EXTENDED_ERROR | ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM
dwSSPIOutFlags = DWORD()
recv_data = b''
while Status == SEC_I_CONTINUE_NEEDED or Status == SEC_E_INCOMPLETE_MESSAGE or Status == SEC_I_INCOMPLETE_CREDENTIALS:
if len(recv_data) == 0 or Status == SEC_E_INCOMPLETE_MESSAGE:
if doRead:
data = self._sock.recv(2048, raw = True)
recv_data += data
else:
doRead = True
inBufferGroup = SecBufferDesc()
inBufferGroup.cBuffers = 2
inBufferGroup.ulVersion = SECBUFFER_VERSION
buffers = (SecBuffer * 2)()
buffers[0].pvBuffer = cast(c_char_p(recv_data), c_void_p)
buffers[0].cbBuffer = len(recv_data)
buffers[0].BufferType = SECBUFFER_TOKEN
buffers[1].pvBuffer = None
buffers[1].cbBuffers = 0
buffers[1].BufferType = SECBUFFER_EMPTY
inBufferGroup.pBuffers = buffers
outBufferGroup = SecBufferDesc()
outBufferGroup.cBuffers = 1
outBufferGroup.ulVersion = SECBUFFER_VERSION
buffers = (SecBuffer * 1)()
buffers[0].pvBuffer = None
buffers[0].BufferType = SECBUFFER_TOKEN
buffers[0].cbBuffer = 0
outBufferGroup.pBuffers = buffers
Status = self._securityFunc.InitializeSecurityContext(byref(self._creds),
byref(self._context),
c_wchar_p(self._server_hostname),
dwSSPIFlags,
0,
SECURITY_NATIVE_DREP,
byref(inBufferGroup),
0,
None,
byref(outBufferGroup),
byref(dwSSPIOutFlags),
POINTER(ULONG)()
)
if Status == SEC_E_OK or Status == SEC_I_CONTINUE_NEEDED:
if outBufferGroup.pBuffers[0].cbBuffer != 0 and outBufferGroup.pBuffers[0].pvBuffer is not None:
data = string_at(outBufferGroup.pBuffers[0].pvBuffer, outBufferGroup.pBuffers[0].cbBuffer)
if self._sock.sendall(data, raw = True) == 0:
self._securityFunc.FreeContextBuffer(outBufferGroup.pBuffers[0].pvBuffer)
self._securityFunc.DeleteSecurityContext(byref(self._context))
return (SEC_E_INTERNAL_ERROR, None)
else:
self._securityFunc.FreeContextBuffer(outBufferGroup.pBuffers[0].pvBuffer)
outBufferGroup.pBuffers[0].pvBuffer = None
if Status == SEC_E_INCOMPLETE_MESSAGE:
continue
if Status == SEC_E_OK:
if inBufferGroup.pBuffers[1].BufferType == SECBUFFER_EXTRA:
return (Status, recv_data[-inBufferGroup.pBuffers[1].cbBuffer:])
else:
return (Status, None)
if inBufferGroup.pBuffers[1].BufferType == SECBUFFER_EXTRA:
recv_data = recv_data[-inBufferGroup.pBuffers[1].cbBuffer:]
else:
recv_data = b""
if Status == SEC_I_INCOMPLETE_CREDENTIALS:
#return (Status, None)
doRead = False
continue
return (Status, None)
def _InitSecurityInterface(self):
func = SCHANNEL.InitSecurityInterfaceW
func.restype = POINTER(SecurityFunctionTable)
self._securityFunc = func().contents
def _wrap_socket(self, sock, server_side, server_hostname, client_certificate = None):
self._sock = sock
self._server_hostname = server_hostname
self._client_certificate = client_certificate
return self
def _ClientCreateCredentials(self):
if self._client_certificate is not None:
self._SchannelCred.cCreds = 1
self._SchannelCred.paCred = pointer(self._client_certificate)
self._SchannelCred.grbitEnabledProtocols = SP_PROT_TLS1_1_CLIENT #| SP_PROT_TLS1_1_CLIENT | SP_PROT_SSL2_CLIENT
self._SchannelCred.dwVersion = SCHANNEL_CRED_VERSION
self._SchannelCred.dwFlags |= SCH_CRED_NO_DEFAULT_CREDS | \
SCH_CRED_NO_SYSTEM_MAPPER | \
SCH_CRED_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT | \
SCH_CRED_IGNORE_NO_REVOCATION_CHECK
Status = self._securityFunc.AcquireCredentialsHandle(None,
c_wchar_p(UNISP_NAME),
SECPKG_CRED_OUTBOUND,
None,
byref(self._SchannelCred),
None,
None,
byref(self._creds),
POINTER(ULONG)())
if Status != SEC_E_OK:
raise SSLError(WinError(Status))
def send(self, data, flags = 0, plaintext = False):
if self._intialized is False and plaintext is True:
return self._sock.sendall(data, flags, raw = True)
else:
Sizes = SecPkgContext_StreamSizes()
Status = self._securityFunc.QueryContextAttributes(byref(self._context), SECPKG_ATTR_STREAM_SIZES, byref(Sizes))
if Status != SEC_E_OK:
raise SSLError(WinError(c_long(Status).value))
bufferValue = b'\x00' * Sizes.cbHeader + data + b'\x00' * Sizes.cbTrailer + (b'\x00' *(Sizes.cbMaximumMessage - len(data)))
allocatedBuffer = create_string_buffer(bufferValue)
messageBuffers = SecBufferDesc()
messageBuffers.cBuffers = 4
messageBuffers.ulVersion = SECBUFFER_VERSION
buffers = (SecBuffer * 4)()
buffers[0].BufferType = SECBUFFER_STREAM_HEADER
buffers[0].cbBuffer = Sizes.cbHeader
buffers[0].pvBuffer = cast(byref(allocatedBuffer), c_void_p)
buffers[1].BufferType = SECBUFFER_DATA
buffers[1].cbBuffer = len(data)
buffers[1].pvBuffer = cast(byref(allocatedBuffer, Sizes.cbHeader), c_void_p)
buffers[2].BufferType = SECBUFFER_STREAM_TRAILER
buffers[2].cbBuffer = Sizes.cbTrailer
buffers[2].pvBuffer = cast(byref(allocatedBuffer, Sizes.cbHeader + len(data)), c_void_p)
buffers[3].BufferType = SECBUFFER_EMPTY
messageBuffers.pBuffers = buffers
Status = self._securityFunc.EncryptMessage(byref(self._context),0, byref(messageBuffers), 0)
if Status != SEC_E_OK:
raise SSLError(WinError(c_long(Status).value))
encrypted_data = string_at(buffers[0].pvBuffer, buffers[0].cbBuffer + buffers[1].cbBuffer + buffers[2].cbBuffer)
return self._sock.sendall(encrypted_data, flags, raw=True)
def recv(self, buffersize, flags=0, plaintext=False):
if self._intialized is False and plaintext is True:
return self._sock.recv(buffersize, flags, raw=True)
else:
if len(self._recv_buffer_decrypted) > 0:
decrypted_data = self._recv_buffer_decrypted[:buffersize]
self._recv_buffer_decrypted = self._recv_buffer_decrypted[buffersize:]
return decrypted_data
decrypted_data = self._recv_buffer_decrypted
shouldContinue = True
while shouldContinue:
self._recv_buffer += self._sock.recv(buffersize, flags, raw = True)
messageBuffers = SecBufferDesc()
messageBuffers.cBuffers = 4
messageBuffers.ulVersion = SECBUFFER_VERSION
buffers = (SecBuffer * 4)()
buffers[0].pvBuffer = cast(c_char_p(self._recv_buffer), c_void_p)
buffers[0].cbBuffer = len(self._recv_buffer)
buffers[0].BufferType = SECBUFFER_DATA
buffers[1].BufferType = SECBUFFER_EMPTY
buffers[2].BufferType = SECBUFFER_EMPTY
buffers[3].BufferType = SECBUFFER_EMPTY
messageBuffers.pBuffers = buffers
Status = self._securityFunc.DecryptMessage(byref(self._context), byref(messageBuffers), 0, None)
if Status == SEC_E_INCOMPLETE_MESSAGE:
continue
if Status != SEC_E_OK and Status != SEC_I_RENEGOTIATE:
raise SSLError(WinError(c_long(Status).value))
for idx in range(1,4):
if messageBuffers.pBuffers[idx].BufferType == SECBUFFER_DATA:
decrypted_data += string_at(messageBuffers.pBuffers[idx].pvBuffer, messageBuffers.pBuffers[idx].cbBuffer)
break
extra_data = b''
for idx in range(1,4):
if messageBuffers.pBuffers[idx].BufferType == SECBUFFER_EXTRA:
extra_data = string_at(messageBuffers.pBuffers[idx].pvBuffer, messageBuffers.pBuffers[idx].cbBuffer)
break
if len(extra_data) > 0:
self._recv_buffer = extra_data
continue
else:
self._recv_buffer = b''
shouldContinue = False
if Status == SEC_I_RENEGOTIATE:
(Status, _) = self._ClientHandshakeLoop(doRead = False)
shouldContinue = True
if Status != SEC_E_OK:
raise SSLError(WinError(c_long(Status).value))
elif Status != SEC_E_OK:
raise SSLError(WinError(c_long(Status).value))
self._recv_buffer_decrypted = decrypted_data[buffersize:]
return decrypted_data[:buffersize]
|
lsowen/pySchannelSSL
|
sslcontext.py
|
Python
|
gpl-2.0
| 19,919
|
#! /usr/bin/env python
"""
##############################################################################
##
##
## @Name : graphicsRequestBroker.py
##
##
## @author : Nicholas Lemay
##
## @since : 2007-06-28, last updated on 2008-04-23
##
##
## @license : MetPX Copyright (C) 2004-2006 Environment Canada
## MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
## named COPYING in the root of the source directory tree.
##
## @summary : This file is to be used as a bridge between the graphics
## request web page and hte different plotting methods.
##
##
##
## @requires: graphicsRequest, wich sends all the queries.
##
## The different graphic plotters.
##
##
##############################################################################
"""
import cgi, gettext, os, sys
import cgitb; cgitb.enable()
sys.path.insert(2, sys.path[0] + '/../../..')
from pxStats.lib.StatsPaths import StatsPaths
from pxStats.lib.GnuQueryBroker import GnuQueryBroker
from pxStats.lib.RRDQueryBroker import RRDQueryBroker
from pxStats.lib.LanguageTools import LanguageTools
from cgi import escape
LOCAL_MACHINE = os.uname()[1]
EXPECTED_PARAMETERS = [ 'lang', 'querier','endTime','groupName','span','fileType','machines','statsTypes','preDeterminedSpan','sourlients','combineSourlients','products']
CURRENT_MODULE_ABS_PATH = os.path.abspath(__file__).replace( ".pyc", ".py" )
def returnToQueriersLocationWithReply( querier, reply ):
"""
@summary : Changes location back to the querier + returns the
reply of the query to the querier.
@param querier: String containing the location of the querier.
@param reply : Series of parameters to send as a reply to te querier.
"""
print """
HTTP/1.0 200 OK
Server: NCSA/1.0a6
Content-type: text/plain
"""
print """
%s
""" %( escape(reply) )
def getQuerierLocation( form ):
"""
@param form : Form with whom this programm was called.
@return : Returns the queriers location.
"""
try:
querier = form["querier"]
except:
querier = ""
return querier
def handlePlotRequest( form, language ):
"""
@param form: form wich contains
the parameters to use
for the query.
@param language : language of the querier.
@precondition: global _ translator
must have been set
prior to calling
this method.
"""
global _
querier = getQuerierLocation( form )
plotter = getPlotterType(form)
#validate for known plotter
if plotter == "gnuplot":
queryBroker = GnuQueryBroker( querierLanguage = language )
elif plotter == "rrd":
queryBroker = RRDQueryBroker( querierLanguage = language )
else:
queryBroker = None
#---------------------------------------------------------------------- try:
if queryBroker != None :#if valid plotter
queryBroker.getParametersFromForm( form )
error = queryBroker.searchForParameterErrors()
if error == "" :
queryBroker.prepareQuery( )
queryBroker.executeQuery( )
reply = queryBroker.getReplyToSendToquerier()
returnToQueriersLocationWithReply( querier , reply )
else: #An error was located within the call.
queryBroker.replyParameters.error = error
reply = queryBroker.getReplyToSendToquerier()
returnToQueriersLocationWithReply( querier , reply )
else:#other
reply = "images=;error=" + _("Cannot execute query.Unknown plotter.Plotter was %s") %plotter
returnToQueriersLocationWithReply( querier , reply )
#---------------------------------------------------- except Exception,inst:
#---------------- reply = "images=;error=Unexpected error : %s." %(inst)
#------------------ returnToQueriersLocationWithReply( querier , reply )
def getPlotterType( form ):
"""
@param form : Form with whom this programm was called.
@return : Returns the plotter type.
"""
#---------------------------------------------------------------------- try:
if ( form["preDeterminedSpan"] == _("daily") ) :
plotter = "gnuplot"
else:
try:
if int( form["span"] )<= 36 :
plotter = "gnuplot"
else:
plotter = "rrd"
except:
plotter = "rrd"
#------------------------------------------------------------------- except:
#---------------------------------------------------------- plotter = ""
return plotter
def getForm():
"""
@summary: Returns the form with whom this page was called.
@note: The input form is expected ot be contained within the field storage.
Thus this program is expected to be called from requests like
xmlhttp.send()
@return: Returns the form with whom this page was called.
"""
newForm = {}
form = cgi.FieldStorage()
#print form
for key in form.keys():
# print key
value = form.getvalue(key, "")
if isinstance(value, list):
# Multiple username fields specified
newvalue = ",".join(value)
else:
newvalue = value
newForm[key.replace("?","")]= newvalue
for param in EXPECTED_PARAMETERS:
if param not in newForm.keys():
newForm[param] = ''
form = newForm
#print form
return form
def getLanguage( form ):
"""
@summary : Returns the language in which
the page should be generated.
@param form: Form containing hte parameters
with whom this program was
called.
"""
language = ""
try :
language = form["lang"]
except:
pass
return language
def setGlobalLanguageParameters( language ):
"""
@summary : Sets up all the needed global language
variables so that they can be used
everywhere in this program.
@param language: language with whom this
script was called.
@return: None
"""
global _
_ = LanguageTools.getTranslatorForModule( CURRENT_MODULE_ABS_PATH, language)
def main():
"""
@summary: Based on the plotter specified in the received form,
executes query using a broker that's specific to
the said plotter.
"""
try:
form = getForm()
language = getLanguage( form )
if language == "" : # unspecified language.
querier = getQuerierLocation( form )
reply = "images=;error=" + "Error in query broker. Cannot proceed with query. No language was specified." #No language, do not translate.
returnToQueriersLocationWithReply( querier, reply )
elif language not in LanguageTools.getSupportedLanguages(): # unsupported language
querier = getQuerierLocation( form )
reply = "images=;error=" + "Error in query broker. Cannot proceed with query. %s is not a supported language." %language #Unsupported language, do not translate.
returnToQueriersLocationWithReply( querier, reply )
else: #params seem ok
setGlobalLanguageParameters( language )
handlePlotRequest( form, language )
except Exception, instance : #temp file helpfull for debugging!
fileHandle= open('graphicsRequestBrokerDebuggingOutput','w')
fileHandle.write( str(instance) )
fileHandle.close()
if __name__ == '__main__':
main()
|
khosrow/metpx
|
pxStats/bin/webPages/graphicsRequestBroker.py
|
Python
|
gpl-2.0
| 8,456
|
from django.db import models
# Create your models here.
class Tipo_Vehiculo(models.Model):
"""docstring for Tipo_Vehiculo"""
def __init__(self, *args, **kwargs):
super(Tipo_Vehiculo, self).__init__(*args, **kwargs)
tipo_vehiculo = models.CharField(max_length=100, unique=True)
adicional1 = models.CharField(max_length=250, blank=True)
adicional2 = models.CharField(max_length=250, blank=True)
adicional3 = models.CharField(max_length=250, blank=True)
adicional4 = models.CharField(max_length=250, blank=True)
activo = models.BooleanField(default=True)
def __unicode__(self):
return self.tipo_vehiculo
class Meta:
verbose_name_plural = "Tipos de Vehiculos"
class Modelo_Vehiculo(models.Model):
"""docstring for Modelo_Vehiculo"""
def __init__(self, *args, **kwargs):
super(Modelo_Vehiculo, self).__init__(*args, **kwargs)
modelo_vehiculo = models.CharField(max_length=100)
capacidad_peso = models.IntegerField()
capacidad_x = models.DecimalField(max_digits=6, decimal_places=2)
capacidad_y = models.DecimalField(max_digits=6, decimal_places=2)
capacidad_z = models.DecimalField(max_digits=6, decimal_places=2)
capacidad_m3 = models.DecimalField(max_digits=6, decimal_places=2)
adicional1 = models.CharField(max_length=250, blank=True)
adicional2 = models.CharField(max_length=250, blank=True)
adicional3 = models.CharField(max_length=250, blank=True)
adicional4 = models.CharField(max_length=250, blank=True)
activo = models.BooleanField(default=True)
def __unicode__(self):
return self.modelo_vehiculo
class Meta:
verbose_name_plural = "Modelos de Vehiculos"
class Vehiculo(models.Model):
"""docstring for Vehiculo"""
def __init__(self, *args, **kwargs):
super(Vehiculo, self).__init__(*args, **kwargs)
numero_vehiculo = models.CharField(max_length=10)
#mantenimiento_vehiculo = models.ForeignKey()
vehiculo = models.CharField(max_length=100)
patente = models.CharField(max_length=100)
tipo_vehiculo = models.ForeignKey(Tipo_Vehiculo)
modelo_vehiculo = models.ForeignKey(Modelo_Vehiculo)
adicional1 = models.CharField(max_length=250, blank=True)
adicional2 = models.CharField(max_length=250, blank=True)
adicional3 = models.CharField(max_length=250, blank=True)
adicional4 = models.CharField(max_length=250, blank=True)
activo = models.BooleanField(default=True)
def __unicode__(self):
return self.vehiculo
class Meta:
verbose_name_plural = "Vehiculos"
|
jrmendozat/mtvm
|
Vehiculo/models.py
|
Python
|
gpl-2.0
| 2,597
|
#! /usr/bin/env python
import unittest
import time
from Communication import Communication
class CommunicationTest(unittest.TestCase):
def setUp(self):
'''
Verify environment is setup properly.
'''
self.controller = Communication()
self.b_list = self.controller.get_bluetooth_list()
def tearDown(self):
'''
Verify environment is tore down properly.
'''
pass
def test_get_bluetooth_list(self):
'''
Verify that the bluetooth list was retrieve without problems.
'''
value = False
if "Empty" not in self.b_list[0]:
value = True
self.assertTrue(value)
def test_send(self):
'''
Verify that the instruction was send without problems.
'''
for b_name in self.b_list:
if "CURIOSITY"in b_name:
break
self.controller.connect(b_name)
value = self.controller.send("Hello")
time.sleep(5)
self.controller.disconnect()
self.assertTrue(value)
if __name__ == '__main__':
unittest.main()
|
mparra-mpz/CURIOSITY
|
CURIOSITY/test/CommunicationTest.py
|
Python
|
gpl-2.0
| 1,149
|
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import sys
import urllib
import shows
import settings
def menu(handle):
total_items = 4
addDirectory(handle, 'All', 14, True, settings.addon_path + '/manage.png', total_items)
addDirectory(handle, 'Continuing', 14, True, settings.addon_path + '/manage.png', total_items)
addDirectory(handle, 'Ended', 14, True, settings.addon_path + '/manage.png', total_items)
addDirectory(handle, 'Paused', 14, True, settings.addon_path + '/manage.png', total_items)
xbmcplugin.endOfDirectory(int(handle))
def addDirectory(handle, menu_item_name, menu_number, folder, icon, total_items):
return_url = 'plugin://{}/?mode={}&name={}'.format(settings.pluginID, menu_number, urllib.quote_plus(menu_item_name))
list_item = xbmcgui.ListItem(menu_item_name)
list_item.setArt({'icon': icon, 'thumb': icon, 'poster': icon, 'fanart': '', 'banner': '', 'clearart': '', 'clearlogo': '', 'landscape': ''})
xbmcplugin.addDirectoryItem(handle=int(handle), url=return_url, listitem=list_item, isFolder=folder, totalItems=total_items)
|
Hiltronix/plugin.video.sickrage
|
resources/lib/show_filter.py
|
Python
|
gpl-2.0
| 1,116
|
import time
import numpy
from golib.config.golib_conf import gsize, B, W, E
import camkifu.stone
from camkifu.core import imgutil
class StonesFinderTuto(camkifu.stone.StonesFinder):
""" This class has been used to write a tutorial on how to create a new StonesFinder.
Run Camkifu with this class as the default StonesFinder in order to replay one step of the tuto.
In order to select the step, rename the desired method below to '_find(...)' .
"""
def __init__(self, vmanager):
super().__init__(vmanager)
self.canvas = None
def _learn(self):
pass
# ------------------------------------------------------
#
# TUTORIAL STEPS
#
# ------------------------------------------------------
def _find_minimal(self, goban_img):
""" Implementation 1 of _find() from the tutorial.
"""
imgutil.draw_str(goban_img, "Hello stones finding tutorial !")
self._show(goban_img)
def _find_suggest(self, _):
""" Implementation 2 of _find() from the tutorial.
"""
# check emptiness to avoid complaints since this method will be called in a loop
if self.is_empty(2, 12):
# using "numpy" coordinates frame for x and y
self.suggest(B, 2, 12)
def _find_bulk(self, _):
""" Implementation 3 of _find() from the tutorial.
"""
# using "numpy" coordinates frame for x and y
black = ((W, 8, 8), (W, 8, 10), (W, 10, 8), (W, 10, 10))
white = ((B, 7, 7), (B, 7, 11), (B, 11, 7), (B, 11, 11), (B, 9, 9))
add = black if self.total_f_processed % 2 else white
rem = white if self.total_f_processed % 2 else black
moves = []
for color, r, c in add:
moves.append((color, r, c))
for _, r, c in rem:
if not self.is_empty(r, c):
moves.append((E, r, c))
time.sleep(0.7)
self.bulk_update(moves)
def _find_getrect(self, goban_img):
""" Implementation 4 of _find() from the tutorial.
"""
canvas = numpy.zeros_like(goban_img)
for r in range(gsize): # row index
for c in range(gsize): # column index
if r == c or r == gsize - c - 1:
x0, y0, x1, y1 = self.getrect(r, c)
canvas[x0:x1, y0:y1] = goban_img[x0:x1, y0:y1]
self._show(canvas)
def _find_border(self, goban_img):
""" Implementation 5 of _find() from the tutorial.
"""
canvas = numpy.zeros_like(goban_img)
for r, c in self._empties_border(2): # 2 is the line height as in go vocabulary (0-based)
x0, y0, x1, y1 = self.getrect(r, c)
canvas[x0:x1, y0:y1] = goban_img[x0:x1, y0:y1]
self._show(canvas)
def _find_spiral(self, goban_img):
""" Implementation 6 of _find() from the tutorial.
"""
count = 0
if self.canvas is None:
self.canvas = numpy.zeros_like(goban_img)
for r, c in self._empties_spiral():
if count == self.total_f_processed % gsize ** 2:
x0, y0, x1, y1 = self.getrect(r, c)
self.canvas[x0:x1, y0:y1] = goban_img[x0:x1, y0:y1]
break
count += 1
self.last_shown = 0 # force display of all images
self._show(self.canvas)
|
ArnaudPel/CamKifu
|
src/camkifu/stone/sf_tuto.py
|
Python
|
gpl-2.0
| 3,397
|
from Adafruit_ADS1x15 import ADS1x15 as A2DObject
from functools import partial
# create AI channel objects
class aiChannel:
def __init__(self,confDict):
#open connection on physicalChannel
self.name = confDict['labelText']
self.i2cAddress = confDict['i2cAddress']
self.connectionType = confDict['connectionType']
self.physChan = confDict['physicalChannel']
self.gain = confDict['gainFactor']
self.rate = confDict['sampleRate']
self.LCD = None
self.mapStyle = confDict['mappingStyle']
self.mapParams = confDict['mapParams']
self.units = confDict['mappedUnits']
self.connection = A2DObject(address=self.i2cAddress)
self.readOrder = confDict['readOrder']
# gets the latest raw, measured voltage off the ADC
def getLatestVoltage(self):
if self.connectionType == 'RSE':
return self.connection.readADCSingleEnded(
channel=self.physChan,
pga=self.gain,
sps=self.rate
)
elif self.connectionType == 'diff':
return self.connection.readADCDifferential(
chP=self.physChan[0], chN=self.physChan[1],
pga=self.gain,
sps=self.rate
)
else:
print 'UNKNOWN CONNECTION TYPE SPECIFIED!!!'
return 0
# maps the raw voltage to a reading (e.g. volts -> pressure)
def _map(self,voltage):
if self.mapStyle == 'poly':
reading = self.mapParams[0]
reading += self.mapParams[1]*voltage
reading += self.mapParams[2]*voltage**2
reading += self.mapParams[3]*voltage**3
reading += self.mapParams[4]*voltage**4
elif self.mapStyle == 'exp':
reading = self.mapParams[0]*(self.mapParams[1]**voltage)
else:
reading = 0
print 'no mapping style was defined!'
return reading
# gets the latest reading off the ADC
def getLastReading(self):
newVoltage = self.getLatestVoltage()
newReading = self._map(newVoltage)
if self.LCD is not None:
self.LCD.display(newReading)
return newReading
# gets N readings and returns the average
def getNReadings(self,nSamp):
if self.connectionType == 'RSE':
self.connection.startContinuousConversion(
channel = self.physChan,
pga = self.gain,
sps = self.rate
)
total = 0.
for i in range(nSamp):
total += self.connection.getLastConversionResults()
self.connection.stopContinuousConversion()
result = self._map(total/nSamp)
return result
elif self.connectionType == 'diff':
self.connection.startContinuousDifferentialConversion(
chP=self.physChan[0], chN=self.physChan[1],
pga=self.gain,
sps=self.rate
)
total = 0.
for i in range(nSamp):
total += self.connection.getLastConversionResults()
self.connection.stopContinuousConversion()
result = self._map(total/nSamp)
return result
else:
print 'UNKNOWN CONNECTION TYPE SPECIFIED!!!'
return 0
from config import roverLogPath
import ConfigParser
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # use pin numberings printed on cobbler
GPIO.setwarnings(False) # silence overuse warnings in case you have two DO's on same pin
# create DO channel objects
class doChannel:
def __init__(self,confDict,aiChanDict,clockFunct=None):
# read in static class variables
self.name = confDict['name']
self.physChanNum = confDict['physicalChannel']
self.labelText = confDict['labelText']
self.aiChanDict = aiChanDict
self.clockFunction = clockFunct
self.currentState = False
GPIO.setup(self.physChanNum,GPIO.OUT)
initState = confDict['initState'] in ['True']
self.setState(initState)
self.interlockState = False
initInterlockState = confDict['initInterlockState'] in ['True']
self.setInterlockState(initInterlockState)
self.interlocks = {}
self.confDict = confDict
# initialize interlock configparser object, read in
self.interlockConfigParser = ConfigParser.RawConfigParser()
self.interlockConfigFilename = os.path.join(roverLogPath, 'interlockConfigs', 'interlockConfig_'+self.name+'.txt')
self.interlockConfigParser.read(self.interlockConfigFilename)
# parse the interlocks config dicts and create each
for interlockKey in self.interlockConfigParser.sections():
thisInterlockConfDict = {}
thisInterlockConfDict['senseChan'] = self.interlockConfigParser.get(interlockKey, 'senseChan')
thisInterlockConfDict['logFun'] = self.interlockConfigParser.get(interlockKey, 'logFun')
thisInterlockConfDict['limVal'] = float(self.interlockConfigParser.get(interlockKey, 'limVal'))
thisAIChanObj = self.aiChanDict[thisInterlockConfDict['senseChan']]
thisInterlock = self.createInterlock(thisInterlockConfDict,thisAIChanObj,key=int(interlockKey))
def setState(self, newState):
GPIO.output(self.physChanNum, newState)
if self.clockFunction is not None:
self.clockFunction()
self.currentState = newState
if newState == True: stateStr = 'ON'
if newState == False: stateStr = 'OFF'
print self.name+' has been turned '+stateStr
def getState(self):
state = GPIO.input(self.physChanNum)
self.currentState = state
return state
def createInterlock(self,confDict,aiObj,key=None):
newInterlock = interlock(confDict,aiObj)
if key is None:
interlockIndex = len(self.interlocks.keys())
else:
interlockIndex = key
self.interlocks[interlockIndex] = newInterlock
def addInterlock(self,interlock):
interlockIndex = len(self.interlocks.keys())
self.interlocks[interlockIndex] = interlock
def deleteInterlock(self,interlockKey):
del self.interlocks[interlockKey]
def getInterlocks(self):
return self.interlocks
def setInterlockState(self,newState):
self.interlockState = newState
def testInterlocks(self):
if not self.interlockState: return False
for interlock in self.interlocks.values():
if interlock.testInterlock():
print 'INTERLOCK TRIPPED ON '+self.name+'!!!'
print str(interlock.aiChannelObj.name)+' was measured above setpoint of '+str(interlock.limitValue)+' at '+str(interlock.aiChannelObj.LCD.value())
return True
return False
def configUpdate(self):
for interlockKey, interlock in self.interlocks.items():
confDict = interlock.getConfDict()
interlockKey = str(interlockKey)
if interlockKey not in self.interlockConfigParser.sections():
self.interlockConfigParser.add_section(interlockKey)
self.interlockConfigParser.set(interlockKey, 'senseChan', confDict['senseChan'])
self.interlockConfigParser.set(interlockKey, 'logFun', confDict['logFun'])
self.interlockConfigParser.set(interlockKey, 'limVal', str(confDict['limVal']))
configSectionList = self.interlockConfigParser.sections()
for configSection in configSectionList:
if int(configSection) not in self.interlocks.keys():
self.interlockConfigParser.remove_section(configSection)
with open(self.interlockConfigFilename, 'wb') as configfile:
self.interlockConfigParser.write(configfile)
# create interlock object
# upon initialization it takes an analog input channel (e.g. water temp)
# to monitor, a limit value (e.g. 30 degrees). These will not be directly
# initialized but rather created/disabled/destroyed by a digital channel
# object which the interlock can toggle when limit is crossed as per
# logicalFunction.
# interlocks will be test of the logical form:
# if [AI_CHANNEL] is [LOGICAL_FUNCTION] [LIMITVAL] then turn off
import operator
LOGICAL_FUNCTIONS = {
'>': operator.gt,
'>=': operator.ge,
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne
}
class interlock:
def __init__(self,confDict,aiObj):
self.aiChannelName = confDict['senseChan']
self.logicalFunction = confDict['logFun']
self.limitValue = confDict['limVal']
self.aiChannelObj = aiObj
def testInterlock(self):
function = LOGICAL_FUNCTIONS[self.logicalFunction] # lookup logical function based on string
latestReading = self.aiChannelObj.LCD.value() # get the latest measured value via LCD
interlockTripped = not function(latestReading,self.limitValue) #e.g. is latest reading greater than setpoint?
return interlockTripped
def getConfDict(self):
confDict = {}
confDict['senseChan'] = self.aiChannelName
confDict['logFun'] = self.logicalFunction
confDict['limVal'] = self.limitValue
return confDict
|
stevens4/rover3
|
lowLevelLibrary.py
|
Python
|
gpl-2.0
| 9,709
|
# Copyright (c) 2007-2009 Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, sys
class Config:
instance = None
def __init__(self):
self.colours = {
# Colours specified as name : (red, green, blue), value range 0..999
'fg_dark' : (400, 400, 360),
'fg_normal' : (600, 600, 550),
'fg_bright' : (999, 999, 800),
'bg_dark' : (0, 0, 0),
'bg_normal' : (0, 168, 325),
'bg_bright' : (0, 200, 400),
}
self.ftpserver = ''
@classmethod
def Inst(cls):
if cls.instance is None:
cls.instance = Config()
return cls.instance
@classmethod
def Mutate(cls, inConfig):
cls.instance = inConfig
def Colour(self, inName):
return self.colours[inName]
def FTPServer(self):
return self.ftpserver
def BrandingMap(self):
return {}
def AllShellsTimeout(self):
return True
def DisplaySerialNumber(self):
return True
def DisplayAssetTag(self):
return True
def BMCName(self):
return 'BMC'
def FirstBootEULAs(self):
# Subclasses in XSConsoleConfigOEM can add their EULAs to this array
return ['/EULA']
# Import a more specific configuration if available
if os.path.isfile(sys.path[0]+'/XSConsoleConfigOEM.py'):
import XSConsoleConfigOEM
|
mcclurmc/xcp-console
|
XSConsoleConfig.py
|
Python
|
gpl-2.0
| 2,099
|
#!/usr/bin/env python3
"""
Author: Kartamyshev A.I. (Darth Feiwante)
"""
def inherit_icalc_isotropic(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None, min_mult = 1, max_mult = 1, num_points = 2, geo_folder = '', it_folder = '', override = False):
"""
This function makes set of structures uniformly scaled from the initial one within the range of deformation
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', 'version')
- database (dict) - dictionary with the project's results
- min_mult (int) - minimal deformation of the initial structure
- max_mult (int) - maximal deformation of the initial structure
- num_points (int) - number of newly built structures
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from calc_manage import inherit_icalc
min_mult = min_mult
max_mult = max_mult
num_points = num_points
step = (max_mult - min_mult)/(num_points - 1)
mult_list = [min_mult+step*i for i in range(num_points)]
version = start_new_version
for j in mult_list:
inherit_icalc('isotropic', new_structure, version, base_calculation, database, mult_rprimd = j, geo_folder=geo_folder, override=override)
version += 1
def inherit_icalc_c_a(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None, min_mult_a = 1, max_mult_a = 1, num_points_a = 2,
min_mult_c = 1, max_mult_c = 1,num_points_c = 2, geo_folder='', it_folder =''):
"""
This function makes set of structures deformed uniformly in the plane presented by the vectors 1 and 2 and separately deformed along the vector 3 of the lattice
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', 'version')
- database (dict) - dictionary with the project's results
- min_mult_a (float) - minimal simultaneous deformation of the vector 1 and 2 of the final structure from "base_calculation"
- max_mult_a (float) - maximal simultaneous deformation of the vector 1 and 2 of the final structure from "base_calculation"
- num_points_a (int) - number of different simultaneous deformations of the vectors 1 and 2
- min_mult_c (float) - minimal deformation of the vector 3 of the structure from "base_calculation"
- max_mult_c (float) - maximal deformation of the vector 3 of the structure from "base_calculation"
- num_points_c (int) - number of different deformations of the vector 3 from "base_calculation"
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from classes import inherit_icalc
if num_points_a > 1:
# Lattice parameter a
min_mult_a = min_mult_a
max_mult_a = max_mult_a
num_points_a = num_points_a
step_a = (max_mult_a - min_mult_a)/(num_points_a - 1)
mult_list_a = [min_mult_a+step_a*i for i in range(num_points_a)]
if num_points_c > 1:
# Lattice parameter c
min_mult_c = min_mult_c
max_mult_c = max_mult_c
num_points_c = num_points_c
step_c = (max_mult_c - min_mult_c)/(num_points_c - 1)
mult_list_c = [min_mult_c+step_c*i for i in range(num_points_c)]
print('database', database)
version = start_new_version
if num_points_a > 1 and num_points_c > 1:
for j in mult_list_a:
for k in mult_list_c:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = j, mult_c = k, geo_folder=geo_folder)
version += 1
elif num_points_c == 1:
for j in mult_list_a:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = j, mult_c = 1, geo_folder=geo_folder, override=override)
version += 1
elif num_points_a == 1:
for j in mult_list_c:
inherit_icalc('c_a', new_structure, version, base_calculation, database, mult_a = 1, mult_c = j, geo_folder=geo_folder, override=override)
version += 1
def inherit_icalc_x_y(new_structure = '', start_new_version = None, base_calculation = (None, None, None), database = None,
min_mult_a = 1, max_mult_a = 1, num_points_a = 2, min_mult_b = 1, max_mult_b = 1,num_points_b = 2, geo_folder='', it_folder ='',
override = False):
"""
This function makes set of structures separately deformed along the vectors 1 and 2 of the lattice
INPUT:
- new_structure (str) - arbitary name for your crystal structure
- start_new_version (int) - start version for newly built structures
- base_calculation (tuple) - tuple describing initial Calculation object in form ('structure', 'set', version)
- database (dict) - dictionary with the project's results
- min_mult_a (float) - minimal deformation of the vector 1 of the structure from "base_calculation"
- max_mult_a (float) - maximal deformation of the vector 1 of the structure from "base_calculation"
- num_points_a (int) - number of different deformations of the vector 2
- min_mult_b (float) - minimal deformation of the vector 2 of the structure from "base_calculation"
- max_mult_b (float) - maximal deformation of the vector 2 of the structure from "base_calculation"
- num_points_b (int) - number of different deformations of the vector 2
- geo_folder (str) - path to the folder to save *.geo files of newly built structures
- it folder (str) - section folder
- override (boolean) - if True then the old structures with the same names will be overwritten
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
from calc_manage import inherit_icalc
if num_points_a > 1:
# Coordinate x in rprimd
step_a = (max_mult_a - min_mult_a)/(num_points_a - 1)
mult_list_a = [min_mult_a+step_a*i for i in range(num_points_a)]
if num_points_b > 1:
# Coordinate y in rprimd
step_b = (max_mult_b - min_mult_b)/(num_points_b - 1)
mult_list_b = [min_mult_b+step_b*i for i in range(num_points_b)]
version = start_new_version
if num_points_a > 1 and num_points_b > 1:
for j in mult_list_a:
for k in mult_list_b:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = j, mult_b = k, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
elif num_points_b == 1:
for j in mult_list_a:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = j, mult_b = 1, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
elif num_points_a == 1:
for j in mult_list_b:
inherit_icalc('xy', new_structure, version, base_calculation, database, mult_a = 1, mult_b = j, geo_folder=geo_folder, it_folder = it_folder, override=override)
version += 1
|
dimonaks/siman
|
siman/structure_functions.py
|
Python
|
gpl-2.0
| 8,328
|
import ArtusConfigBase as base
def config():
conf = base.BaseConfig('mc', '2012', analysis='ee')
conf["InputFiles"] = base.setInputFiles(
ekppath="",
nafpath="/pnfs/desy.de/cms/tier2/store/user/dhaitz/2014_08_08_data_QCD/kappa_QCD_170-250_*.root"
)
conf['EnableLumiReweighting'] = True
conf['EnableTriggerReweighting'] = False
conf['NEvents'] = 31697066
conf['XSection'] = 30990
conf = base.expand(conf, ['all', 'zcuts', 'incut'])
return conf
|
dhaitz/CalibFW
|
cfg/artus/background_ee_qcd_170-250.py
|
Python
|
gpl-2.0
| 497
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import asyncio
import logging
import threading
import os
import bpy
import bgl
import blf
import pillarsdk
from . import async_loop, pillar, cache, blender, utils
REQUIRED_ROLES_FOR_TEXTURE_BROWSER = {'subscriber', 'demo'}
MOUSE_SCROLL_PIXELS_PER_TICK = 50
ICON_WIDTH = 128
ICON_HEIGHT = 128
TARGET_ITEM_WIDTH = 400
TARGET_ITEM_HEIGHT = 128
ITEM_MARGIN_X = 5
ITEM_MARGIN_Y = 5
ITEM_PADDING_X = 5
library_path = '/tmp'
library_icons_path = os.path.join(os.path.dirname(__file__), "icons")
log = logging.getLogger(__name__)
class SpecialFolderNode(pillarsdk.Node):
NODE_TYPE = 'SPECIAL'
class UpNode(SpecialFolderNode):
NODE_TYPE = 'UP'
def __init__(self):
super().__init__()
self['_id'] = 'UP'
self['node_type'] = self.NODE_TYPE
class ProjectNode(SpecialFolderNode):
NODE_TYPE = 'PROJECT'
def __init__(self, project):
super().__init__()
assert isinstance(project, pillarsdk.Project), 'wrong type for project: %r' % type(project)
self.merge(project.to_dict())
self['node_type'] = self.NODE_TYPE
class MenuItem:
"""GUI menu item for the 3D View GUI."""
icon_margin_x = 4
icon_margin_y = 4
text_margin_x = 6
text_height = 16
text_width = 72
DEFAULT_ICONS = {
'FOLDER': os.path.join(library_icons_path, 'folder.png'),
'SPINNER': os.path.join(library_icons_path, 'spinner.png'),
}
FOLDER_NODE_TYPES = {'group_texture', 'group_hdri', UpNode.NODE_TYPE, ProjectNode.NODE_TYPE}
SUPPORTED_NODE_TYPES = {'texture', 'hdri'}.union(FOLDER_NODE_TYPES)
def __init__(self, node, file_desc, thumb_path: str, label_text):
self.log = logging.getLogger('%s.MenuItem' % __name__)
if node['node_type'] not in self.SUPPORTED_NODE_TYPES:
self.log.info('Invalid node type in node: %s', node)
raise TypeError('Node of type %r not supported; supported are %r.' % (
node['node_type'], self.SUPPORTED_NODE_TYPES))
assert isinstance(node, pillarsdk.Node), 'wrong type for node: %r' % type(node)
assert isinstance(node['_id'], str), 'wrong type for node["_id"]: %r' % type(node['_id'])
self.node = node # pillarsdk.Node, contains 'node_type' key to indicate type
self.file_desc = file_desc # pillarsdk.File object, or None if a 'folder' node.
self.label_text = label_text
self._thumb_path = ''
self.icon = None
self._is_folder = node['node_type'] in self.FOLDER_NODE_TYPES
self._is_spinning = False
# Determine sorting order.
# by default, sort all the way at the end and folders first.
self._order = 0 if self._is_folder else 10000
if node and node.properties and node.properties.order is not None:
self._order = node.properties.order
self.thumb_path = thumb_path
# Updated when drawing the image
self.x = 0
self.y = 0
self.width = 0
self.height = 0
def sort_key(self):
"""Key for sorting lists of MenuItems."""
return self._order, self.label_text
@property
def thumb_path(self) -> str:
return self._thumb_path
@thumb_path.setter
def thumb_path(self, new_thumb_path: str):
self._is_spinning = new_thumb_path == 'SPINNER'
self._thumb_path = self.DEFAULT_ICONS.get(new_thumb_path, new_thumb_path)
if self._thumb_path:
self.icon = bpy.data.images.load(filepath=self._thumb_path)
else:
self.icon = None
@property
def node_uuid(self) -> str:
return self.node['_id']
def represents(self, node) -> bool:
"""Returns True iff this MenuItem represents the given node."""
node_uuid = node['_id']
return self.node_uuid == node_uuid
def update(self, node, file_desc, thumb_path: str, label_text=None):
# We can get updated information about our Node, but a MenuItem should
# always represent one node, and it shouldn't be shared between nodes.
if self.node_uuid != node['_id']:
raise ValueError("Don't change the node ID this MenuItem reflects, "
"just create a new one.")
self.node = node
self.file_desc = file_desc # pillarsdk.File object, or None if a 'folder' node.
self.thumb_path = thumb_path
if label_text is not None:
self.label_text = label_text
@property
def is_folder(self) -> bool:
return self._is_folder
@property
def is_spinning(self) -> bool:
return self._is_spinning
def update_placement(self, x, y, width, height):
"""Use OpenGL to draw this one menu item."""
self.x = x
self.y = y
self.width = width
self.height = height
def draw(self, highlighted: bool):
bgl.glEnable(bgl.GL_BLEND)
if highlighted:
bgl.glColor4f(0.555, 0.555, 0.555, 0.8)
else:
bgl.glColor4f(0.447, 0.447, 0.447, 0.8)
bgl.glRectf(self.x, self.y, self.x + self.width, self.y + self.height)
texture = self.icon
err = texture.gl_load(filter=bgl.GL_NEAREST, mag=bgl.GL_NEAREST)
assert not err, 'OpenGL error: %i' % err
bgl.glColor4f(0.0, 0.0, 1.0, 0.5)
# bgl.glLineWidth(1.5)
# ------ TEXTURE ---------#
bgl.glBindTexture(bgl.GL_TEXTURE_2D, texture.bindcode[0])
bgl.glEnable(bgl.GL_TEXTURE_2D)
bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)
bgl.glColor4f(1, 1, 1, 1)
bgl.glBegin(bgl.GL_QUADS)
bgl.glTexCoord2d(0, 0)
bgl.glVertex2d(self.x + self.icon_margin_x, self.y)
bgl.glTexCoord2d(0, 1)
bgl.glVertex2d(self.x + self.icon_margin_x, self.y + ICON_HEIGHT)
bgl.glTexCoord2d(1, 1)
bgl.glVertex2d(self.x + self.icon_margin_x + ICON_WIDTH, self.y + ICON_HEIGHT)
bgl.glTexCoord2d(1, 0)
bgl.glVertex2d(self.x + self.icon_margin_x + ICON_WIDTH, self.y)
bgl.glEnd()
bgl.glDisable(bgl.GL_TEXTURE_2D)
bgl.glDisable(bgl.GL_BLEND)
texture.gl_free()
# draw some text
font_id = 0
blf.position(font_id,
self.x + self.icon_margin_x + ICON_WIDTH + self.text_margin_x,
self.y + ICON_HEIGHT * 0.5 - 0.25 * self.text_height, 0)
blf.size(font_id, self.text_height, self.text_width)
blf.draw(font_id, self.label_text)
def hits(self, mouse_x: int, mouse_y: int) -> bool:
return self.x < mouse_x < self.x + self.width and self.y < mouse_y < self.y + self.height
class BlenderCloudBrowser(pillar.PillarOperatorMixin,
async_loop.AsyncModalOperatorMixin,
bpy.types.Operator):
bl_idname = 'pillar.browser'
bl_label = 'Blender Cloud Texture Browser'
_draw_handle = None
current_path = pillar.CloudPath('/')
project_name = ''
# This contains a stack of Node objects that lead up to the currently browsed node.
path_stack = []
# This contains a stack of MenuItem objects that lead up to the currently browsed node.
menu_item_stack = []
timer = None
log = logging.getLogger('%s.BlenderCloudBrowser' % __name__)
_menu_item_lock = threading.Lock()
current_display_content = [] # list of MenuItems currently displayed
loaded_images = set()
thumbnails_cache = ''
maximized_area = False
mouse_x = 0
mouse_y = 0
scroll_offset = 0
scroll_offset_target = 0
scroll_offset_max = 0
scroll_offset_space_left = 0
def invoke(self, context, event):
# Refuse to start if the file hasn't been saved. It's okay if
# it's dirty, we just need to know where '//' points to.
if not os.path.exists(context.blend_data.filepath):
self.report({'ERROR'}, 'Please save your Blend file before using '
'the Blender Cloud addon.')
return {'CANCELLED'}
wm = context.window_manager
self.current_path = pillar.CloudPath(wm.last_blender_cloud_location)
self.path_stack = [] # list of nodes that make up the current path.
self.thumbnails_cache = cache.cache_directory('thumbnails')
self.mouse_x = event.mouse_x
self.mouse_y = event.mouse_y
# See if we have to maximize the current area
if not context.screen.show_fullscreen:
self.maximized_area = True
bpy.ops.screen.screen_full_area(use_hide_panels=True)
# Add the region OpenGL drawing callback
# draw in view space with 'POST_VIEW' and 'PRE_VIEW'
self._draw_handle = context.space_data.draw_handler_add(
self.draw_menu, (context,), 'WINDOW', 'POST_PIXEL')
self.current_display_content = []
self.loaded_images = set()
self._scroll_reset()
context.window.cursor_modal_set('DEFAULT')
return async_loop.AsyncModalOperatorMixin.invoke(self, context, event)
def modal(self, context, event):
result = async_loop.AsyncModalOperatorMixin.modal(self, context, event)
if not {'PASS_THROUGH', 'RUNNING_MODAL'}.intersection(result):
return result
if event.type == 'TAB' and event.value == 'RELEASE':
self.log.info('Ensuring async loop is running')
async_loop.ensure_async_loop()
if event.type == 'TIMER':
self._scroll_smooth()
context.area.tag_redraw()
return {'RUNNING_MODAL'}
if 'MOUSE' in event.type:
context.area.tag_redraw()
self.mouse_x = event.mouse_x
self.mouse_y = event.mouse_y
left_mouse_release = event.type == 'LEFTMOUSE' and event.value == 'RELEASE'
if self._state == 'PLEASE_SUBSCRIBE' and left_mouse_release:
self.open_browser_subscribe()
self._finish(context)
return {'FINISHED'}
if self._state == 'BROWSING':
selected = self.get_clicked()
if selected:
if selected.is_spinning:
context.window.cursor_set('WAIT')
else:
context.window.cursor_set('HAND')
else:
context.window.cursor_set('DEFAULT')
# Scrolling
if event.type == 'WHEELUPMOUSE':
self._scroll_by(MOUSE_SCROLL_PIXELS_PER_TICK)
context.area.tag_redraw()
elif event.type == 'WHEELDOWNMOUSE':
self._scroll_by(-MOUSE_SCROLL_PIXELS_PER_TICK)
context.area.tag_redraw()
elif event.type == 'TRACKPADPAN':
self._scroll_by(event.mouse_prev_y - event.mouse_y,
smooth=False)
context.area.tag_redraw()
if left_mouse_release:
if selected is None:
# No item clicked, ignore it.
return {'RUNNING_MODAL'}
if selected.is_spinning:
# This can happen when the thumbnail information isn't loaded yet.
return {'RUNNING_MODAL'}
if selected.is_folder:
self.descend_node(selected)
else:
self.handle_item_selection(context, selected)
if event.type in {'RIGHTMOUSE', 'ESC'}:
self._finish(context)
return {'CANCELLED'}
return {'RUNNING_MODAL'}
async def async_execute(self, context):
self._state = 'CHECKING_CREDENTIALS'
self.log.debug('Checking credentials')
try:
db_user = await self.check_credentials(context, REQUIRED_ROLES_FOR_TEXTURE_BROWSER)
except pillar.NotSubscribedToCloudError:
self.log.info('User not subscribed to Blender Cloud.')
self._show_subscribe_screen()
return None
if db_user is None:
raise pillar.UserNotLoggedInError()
await self.async_download_previews()
def _show_subscribe_screen(self):
"""Shows the "You need to subscribe" screen."""
self._state = 'PLEASE_SUBSCRIBE'
bpy.context.window.cursor_set('HAND')
def descend_node(self, menu_item: MenuItem):
"""Descends the node hierarchy by visiting this menu item's node.
Also keeps track of the current node, so that we know where the "up" button should go.
"""
node = menu_item.node
assert isinstance(node, pillarsdk.Node), 'Wrong type %s' % node
if isinstance(node, UpNode):
# Going up.
self.log.debug('Going up to %r', self.current_path)
self.current_path = self.current_path.parent
if self.path_stack:
self.path_stack.pop()
if self.menu_item_stack:
self.menu_item_stack.pop()
if not self.path_stack:
self.project_name = ''
else:
# Going down, keep track of where we were
if isinstance(node, ProjectNode):
self.project_name = node['name']
self.current_path /= node['_id']
self.log.debug('Going down to %r', self.current_path)
self.path_stack.append(node)
self.menu_item_stack.append(menu_item)
self.browse_assets()
@property
def node(self):
if not self.path_stack:
return None
return self.path_stack[-1]
def _finish(self, context):
self.log.debug('Finishing the modal operator')
async_loop.AsyncModalOperatorMixin._finish(self, context)
self.clear_images()
context.space_data.draw_handler_remove(self._draw_handle, 'WINDOW')
context.window.cursor_modal_restore()
if self.maximized_area:
bpy.ops.screen.screen_full_area(use_hide_panels=True)
context.area.tag_redraw()
self.log.debug('Modal operator finished')
def clear_images(self):
"""Removes all images we loaded from Blender's memory."""
for image in bpy.data.images:
if image.filepath_raw not in self.loaded_images:
continue
image.user_clear()
bpy.data.images.remove(image)
self.loaded_images.clear()
self.current_display_content.clear()
def add_menu_item(self, *args) -> MenuItem:
menu_item = MenuItem(*args)
# Just make this thread-safe to be on the safe side.
with self._menu_item_lock:
self.current_display_content.append(menu_item)
self.loaded_images.add(menu_item.icon.filepath_raw)
self.sort_menu()
return menu_item
def update_menu_item(self, node, *args) -> MenuItem:
node_uuid = node['_id']
# Just make this thread-safe to be on the safe side.
with self._menu_item_lock:
for menu_item in self.current_display_content:
if menu_item.represents(node):
menu_item.update(node, *args)
self.loaded_images.add(menu_item.icon.filepath_raw)
break
else:
raise ValueError('Unable to find MenuItem(node_uuid=%r)' % node_uuid)
self.sort_menu()
def sort_menu(self):
"""Sorts the self.current_display_content list."""
if not self.current_display_content:
return
with self._menu_item_lock:
self.current_display_content.sort(key=MenuItem.sort_key)
async def async_download_previews(self):
self._state = 'BROWSING'
thumbnails_directory = self.thumbnails_cache
self.log.info('Asynchronously downloading previews to %r', thumbnails_directory)
self.log.info('Current BCloud path is %r', self.current_path)
self.clear_images()
self._scroll_reset()
project_uuid = self.current_path.project_uuid
node_uuid = self.current_path.node_uuid
if node_uuid:
# Query for sub-nodes of this node.
self.log.debug('Getting subnodes for parent node %r', node_uuid)
children = await pillar.get_nodes(parent_node_uuid=node_uuid,
node_type={'group_texture', 'group_hdri'})
elif project_uuid:
# Query for top-level nodes.
self.log.debug('Getting subnodes for project node %r', project_uuid)
children = await pillar.get_nodes(project_uuid=project_uuid,
parent_node_uuid='',
node_type={'group_texture', 'group_hdri'})
else:
# Query for projects
self.log.debug('No node UUID and no project UUID, listing available projects')
children = await pillar.get_texture_projects()
for proj_dict in children:
self.add_menu_item(ProjectNode(proj_dict), None, 'FOLDER', proj_dict['name'])
return
# Make sure we can go up again.
self.add_menu_item(UpNode(), None, 'FOLDER', '.. up ..')
# Download all child nodes
self.log.debug('Iterating over child nodes of %r', self.current_path)
for child in children:
# print(' - %(_id)s = %(name)s' % child)
if child['node_type'] not in MenuItem.SUPPORTED_NODE_TYPES:
self.log.debug('Skipping node of type %r', child['node_type'])
continue
self.add_menu_item(child, None, 'FOLDER', child['name'])
# There are only sub-nodes at the project level, no texture nodes,
# so we won't have to bother looking for textures.
if not node_uuid:
return
directory = os.path.join(thumbnails_directory, project_uuid, node_uuid)
os.makedirs(directory, exist_ok=True)
self.log.debug('Fetching texture thumbnails for node %r', node_uuid)
def thumbnail_loading(node, texture_node):
self.add_menu_item(node, None, 'SPINNER', texture_node['name'])
def thumbnail_loaded(node, file_desc, thumb_path):
self.update_menu_item(node, file_desc, thumb_path)
await pillar.fetch_texture_thumbs(node_uuid, 's', directory,
thumbnail_loading=thumbnail_loading,
thumbnail_loaded=thumbnail_loaded,
future=self.signalling_future)
def browse_assets(self):
self.log.debug('Browsing assets at %r', self.current_path)
self._new_async_task(self.async_download_previews())
def draw_menu(self, context):
"""Draws the GUI with OpenGL."""
drawers = {
'CHECKING_CREDENTIALS': self._draw_checking_credentials,
'BROWSING': self._draw_browser,
'DOWNLOADING_TEXTURE': self._draw_downloading,
'EXCEPTION': self._draw_exception,
'PLEASE_SUBSCRIBE': self._draw_subscribe,
}
if self._state in drawers:
drawer = drawers[self._state]
drawer(context)
# For debugging: draw the state
font_id = 0
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
blf.position(font_id, 5, 5, 0)
blf.draw(font_id, '%s %s' % (self._state, self.project_name))
bgl.glDisable(bgl.GL_BLEND)
@staticmethod
def _window_region(context):
window_regions = [region
for region in context.area.regions
if region.type == 'WINDOW']
return window_regions[0]
def _draw_browser(self, context):
"""OpenGL drawing code for the BROWSING state."""
window_region = self._window_region(context)
content_width = window_region.width - ITEM_MARGIN_X * 2
content_height = window_region.height - ITEM_MARGIN_Y * 2
content_x = ITEM_MARGIN_X
content_y = context.area.height - ITEM_MARGIN_Y - TARGET_ITEM_HEIGHT
col_count = content_width // TARGET_ITEM_WIDTH
item_width = (content_width - (col_count * ITEM_PADDING_X)) / col_count
item_height = TARGET_ITEM_HEIGHT
block_width = item_width + ITEM_PADDING_X
block_height = item_height + ITEM_MARGIN_Y
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 0.6)
bgl.glRectf(0, 0, window_region.width, window_region.height)
if self.current_display_content:
bottom_y = float('inf')
# The -1 / +2 are for extra rows that are drawn only half at the top/bottom.
first_item_idx = max(0, int(-self.scroll_offset // block_height - 1) * col_count)
items_per_page = int(content_height // item_height + 2) * col_count
last_item_idx = first_item_idx + items_per_page
for item_idx, item in enumerate(self.current_display_content):
x = content_x + (item_idx % col_count) * block_width
y = content_y - (item_idx // col_count) * block_height - self.scroll_offset
item.update_placement(x, y, item_width, item_height)
if first_item_idx <= item_idx < last_item_idx:
# Only draw if the item is actually on screen.
item.draw(highlighted=item.hits(self.mouse_x, self.mouse_y))
bottom_y = min(y, bottom_y)
self.scroll_offset_space_left = window_region.height - bottom_y
self.scroll_offset_max = (self.scroll_offset -
self.scroll_offset_space_left +
0.25 * block_height)
else:
font_id = 0
text = "Communicating with Blender Cloud"
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
text_width, text_height = blf.dimensions(font_id, text)
blf.position(font_id,
content_x + content_width * 0.5 - text_width * 0.5,
content_y - content_height * 0.3 + text_height * 0.5, 0)
blf.draw(font_id, text)
bgl.glDisable(bgl.GL_BLEND)
# bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
def _draw_downloading(self, context):
"""OpenGL drawing code for the DOWNLOADING_TEXTURE state."""
self._draw_text_on_colour(context,
'Downloading texture from Blender Cloud',
(0.0, 0.0, 0.2, 0.6))
def _draw_checking_credentials(self, context):
"""OpenGL drawing code for the CHECKING_CREDENTIALS state."""
self._draw_text_on_colour(context,
'Checking login credentials',
(0.0, 0.0, 0.2, 0.6))
def _draw_text_on_colour(self, context, text, bgcolour):
content_height, content_width = self._window_size(context)
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(*bgcolour)
bgl.glRectf(0, 0, content_width, content_height)
font_id = 0
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
text_width, text_height = blf.dimensions(font_id, text)
blf.position(font_id,
content_width * 0.5 - text_width * 0.5,
content_height * 0.7 + text_height * 0.5, 0)
blf.draw(font_id, text)
bgl.glDisable(bgl.GL_BLEND)
def _window_size(self, context):
window_region = self._window_region(context)
content_width = window_region.width
content_height = window_region.height
return content_height, content_width
def _draw_exception(self, context):
"""OpenGL drawing code for the EXCEPTION state."""
import textwrap
content_height, content_width = self._window_size(context)
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(0.2, 0.0, 0.0, 0.6)
bgl.glRectf(0, 0, content_width, content_height)
font_id = 0
ex = self.async_task.exception()
if isinstance(ex, pillar.UserNotLoggedInError):
ex_msg = 'You are not logged in on Blender ID. Please log in at User Preferences, ' \
'System, Blender ID.'
else:
ex_msg = str(ex)
if not ex_msg:
ex_msg = str(type(ex))
text = "An error occurred:\n%s" % ex_msg
lines = textwrap.wrap(text)
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
_, text_height = blf.dimensions(font_id, 'yhBp')
def position(line_nr):
blf.position(font_id,
content_width * 0.1,
content_height * 0.8 - line_nr * text_height, 0)
for line_idx, line in enumerate(lines):
position(line_idx)
blf.draw(font_id, line)
bgl.glDisable(bgl.GL_BLEND)
def _draw_subscribe(self, context):
self._draw_text_on_colour(context,
'Click to subscribe to the Blender Cloud',
(0.0, 0.0, 0.2, 0.6))
def get_clicked(self) -> MenuItem:
for item in self.current_display_content:
if item.hits(self.mouse_x, self.mouse_y):
return item
return None
def handle_item_selection(self, context, item: MenuItem):
"""Called when the user clicks on a menu item that doesn't represent a folder."""
from pillarsdk.utils import sanitize_filename
self.clear_images()
self._state = 'DOWNLOADING_TEXTURE'
node_path_components = (node['name'] for node in self.path_stack if node is not None)
local_path_components = [sanitize_filename(comp) for comp in node_path_components]
top_texture_directory = bpy.path.abspath(context.scene.local_texture_dir)
local_path = os.path.join(top_texture_directory, *local_path_components)
meta_path = os.path.join(top_texture_directory, '.blender_cloud')
self.log.info('Downloading texture %r to %s', item.node_uuid, local_path)
self.log.debug('Metadata will be stored at %s', meta_path)
file_paths = []
select_dblock = None
node = item.node
def texture_downloading(file_path, *_):
self.log.info('Texture downloading to %s', file_path)
def texture_downloaded(file_path, file_desc, map_type):
nonlocal select_dblock
self.log.info('Texture downloaded to %r.', file_path)
if context.scene.local_texture_dir.startswith('//'):
file_path = bpy.path.relpath(file_path)
image_dblock = bpy.data.images.load(filepath=file_path)
image_dblock['bcloud_file_uuid'] = file_desc['_id']
image_dblock['bcloud_node_uuid'] = node['_id']
image_dblock['bcloud_node_type'] = node['node_type']
image_dblock['bcloud_node'] = pillar.node_to_id(node)
if node['node_type'] == 'hdri':
# All HDRi variations should use the same image datablock, hence once name.
image_dblock.name = node['name']
else:
# All texture variations are loaded at once, and thus need the map type in the name.
image_dblock.name = '%s-%s' % (node['name'], map_type)
# Select the image in the image editor (if the context is right).
# Just set the first image we download,
if context.area.type == 'IMAGE_EDITOR':
if select_dblock is None or file_desc.map_type == 'color':
select_dblock = image_dblock
context.space_data.image = select_dblock
file_paths.append(file_path)
def texture_download_completed(_):
self.log.info('Texture download complete, inspect:\n%s', '\n'.join(file_paths))
self._state = 'QUIT'
# For HDRi nodes: only download the first file.
download_node = pillarsdk.Node.new(node)
if node['node_type'] == 'hdri':
download_node.properties.files = [download_node.properties.files[0]]
signalling_future = asyncio.Future()
self._new_async_task(pillar.download_texture(download_node, local_path,
metadata_directory=meta_path,
texture_loading=texture_downloading,
texture_loaded=texture_downloaded,
future=signalling_future))
self.async_task.add_done_callback(texture_download_completed)
def open_browser_subscribe(self):
import webbrowser
webbrowser.open_new_tab('https://cloud.blender.org/join')
self.report({'INFO'}, 'We just started a browser for you.')
def _scroll_smooth(self):
diff = self.scroll_offset_target - self.scroll_offset
if diff == 0:
return
if abs(round(diff)) < 1:
self.scroll_offset = self.scroll_offset_target
return
self.scroll_offset += diff * 0.5
def _scroll_by(self, amount, *, smooth=True):
# Slow down scrolling up
if smooth and amount < 0 and -amount > self.scroll_offset_space_left / 4:
amount = -self.scroll_offset_space_left / 4
self.scroll_offset_target = min(0,
max(self.scroll_offset_max,
self.scroll_offset_target + amount))
if not smooth:
self._scroll_offset = self.scroll_offset_target
def _scroll_reset(self):
self.scroll_offset_target = self.scroll_offset = 0
class PILLAR_OT_switch_hdri(pillar.PillarOperatorMixin,
async_loop.AsyncModalOperatorMixin,
bpy.types.Operator):
bl_idname = 'pillar.switch_hdri'
bl_label = 'Switch with another variation'
bl_description = 'Downloads the selected variation of an HDRi, ' \
'replacing the current image'
log = logging.getLogger('bpy.ops.%s' % bl_idname)
image_name = bpy.props.StringProperty(name='image_name',
description='Name of the image block to replace')
file_uuid = bpy.props.StringProperty(name='file_uuid',
description='File ID to download')
async def async_execute(self, context):
"""Entry point of the asynchronous operator."""
self.report({'INFO'}, 'Communicating with Blender Cloud')
try:
try:
db_user = await self.check_credentials(context, REQUIRED_ROLES_FOR_TEXTURE_BROWSER)
user_id = db_user['_id']
except pillar.NotSubscribedToCloudError:
self.log.exception('User not subscribed to cloud.')
self.report({'ERROR'}, 'Please subscribe to the Blender Cloud.')
self._state = 'QUIT'
return
except pillar.UserNotLoggedInError:
self.log.exception('Error checking/refreshing credentials.')
self.report({'ERROR'}, 'Please log in on Blender ID first.')
self._state = 'QUIT'
return
if not user_id:
raise pillar.UserNotLoggedInError()
await self.download_and_replace(context)
except Exception as ex:
self.log.exception('Unexpected exception caught.')
self.report({'ERROR'}, 'Unexpected error %s: %s' % (type(ex), ex))
self._state = 'QUIT'
async def download_and_replace(self, context):
from .pillar import sanitize_filename
self._state = 'DOWNLOADING_TEXTURE'
current_image = bpy.data.images[self.image_name]
node = current_image['bcloud_node']
filename = '%s.taken_from_file' % sanitize_filename(node['name'])
local_path = os.path.dirname(bpy.path.abspath(current_image.filepath))
top_texture_directory = bpy.path.abspath(context.scene.local_texture_dir)
meta_path = os.path.join(top_texture_directory, '.blender_cloud')
file_uuid = self.file_uuid
resolution = next(file_ref['resolution'] for file_ref in node['properties']['files']
if file_ref['file'] == file_uuid)
self.log.info('Downloading file %r-%s to %s', file_uuid, resolution, local_path)
self.log.debug('Metadata will be stored at %s', meta_path)
def file_loading(file_path, file_desc, map_type):
self.log.info('Texture downloading to %s (%s)',
file_path, utils.sizeof_fmt(file_desc['length']))
async def file_loaded(file_path, file_desc, map_type):
if context.scene.local_texture_dir.startswith('//'):
file_path = bpy.path.relpath(file_path)
self.log.info('Texture downloaded to %s', file_path)
current_image['bcloud_file_uuid'] = file_uuid
current_image.filepath = file_path # This automatically reloads the image from disk.
await pillar.download_file_by_uuid(file_uuid,
local_path,
meta_path,
filename=filename,
map_type=resolution,
file_loading=file_loading,
file_loaded_sync=file_loaded,
future=self.signalling_future)
self.report({'INFO'}, 'Image download complete')
# store keymaps here to access after registration
addon_keymaps = []
def image_editor_menu(self, context):
self.layout.operator(BlenderCloudBrowser.bl_idname,
text='Get image from Blender Cloud',
icon_value=blender.icon('CLOUD'))
def hdri_download_panel__image_editor(self, context):
_hdri_download_panel(self, context.edit_image)
def hdri_download_panel__node_editor(self, context):
if context.active_node.type not in {'TEX_ENVIRONMENT', 'TEX_IMAGE'}:
return
_hdri_download_panel(self, context.active_node.image)
def _hdri_download_panel(self, current_image):
if not current_image:
return
if 'bcloud_node_type' not in current_image:
return
if current_image['bcloud_node_type'] != 'hdri':
return
try:
current_variation = current_image['bcloud_file_uuid']
except KeyError:
log.warning('Image %r has a bcloud_node_type but no bcloud_file_uuid property.',
current_image.name)
return
row = self.layout.row(align=True).split(0.3)
row.label('HDRi', icon_value=blender.icon('CLOUD'))
row.prop(current_image, 'hdri_variation', text='')
if current_image.hdri_variation != current_variation:
props = row.operator(PILLAR_OT_switch_hdri.bl_idname,
text='Replace',
icon='FILE_REFRESH')
props.image_name = current_image.name
props.file_uuid = current_image.hdri_variation
# Storage for variation labels, as the strings in EnumProperty items
# MUST be kept in Python memory.
variation_label_storage = {}
def hdri_variation_choices(self, context):
if context.area.type == 'IMAGE_EDITOR':
image = context.edit_image
elif context.area.type == 'NODE_EDITOR':
image = context.active_node.image
else:
return []
if 'bcloud_node' not in image:
return []
choices = []
for file_doc in image['bcloud_node']['properties']['files']:
label = file_doc['resolution']
variation_label_storage[label] = label
choices.append((file_doc['file'], label, ''))
return choices
def register():
bpy.utils.register_class(BlenderCloudBrowser)
bpy.utils.register_class(PILLAR_OT_switch_hdri)
bpy.types.IMAGE_MT_image.prepend(image_editor_menu)
bpy.types.IMAGE_PT_image_properties.append(hdri_download_panel__image_editor)
bpy.types.NODE_PT_active_node_properties.append(hdri_download_panel__node_editor)
# HDRi resolution switcher/chooser.
# TODO: when an image is selected, switch this property to its current resolution.
bpy.types.Image.hdri_variation = bpy.props.EnumProperty(
name='HDRi variations',
items=hdri_variation_choices,
description='Select a variation with which to replace this image'
)
# handle the keymap
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if not kc:
print('No addon key configuration space found, so no custom hotkeys added.')
return
km = kc.keymaps.new(name='Screen')
kmi = km.keymap_items.new('pillar.browser', 'A', 'PRESS', ctrl=True, shift=True, alt=True)
addon_keymaps.append((km, kmi))
def unregister():
# handle the keymap
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if hasattr(bpy.types.Image, 'hdri_variation'):
del bpy.types.Image.hdri_variation
bpy.types.IMAGE_MT_image.remove(image_editor_menu)
bpy.types.IMAGE_PT_image_properties.remove(hdri_download_panel__image_editor)
bpy.types.NODE_PT_active_node_properties.remove(hdri_download_panel__node_editor)
bpy.utils.unregister_class(BlenderCloudBrowser)
bpy.utils.unregister_class(PILLAR_OT_switch_hdri)
|
AndrewPeelMV/Blender2.78c
|
2.78/scripts/addons/blender_cloud/texture_browser.py
|
Python
|
gpl-2.0
| 38,526
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Zuza Software Foundation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import sys
from optparse import OptionParser
from django.core import management
import syspath_override
#: Length for the generated :setting:`SECRET_KEY`
KEY_LENGTH = 50
#: Default path for the settings file
DEFAULT_SETTINGS_PATH = '~/.pootle/pootle.conf'
#: Template that will be used to initialize settings from
SETTINGS_TEMPLATE_FILENAME = 'settings/90-local.conf.sample'
def init_settings(settings_filepath, template_filename):
"""Initializes a sample settings file for new installations.
:param settings_filepath: The target file path where the initial settings
will be written to.
:param template_filename: Template file used to initialize settings from.
"""
dirname = os.path.dirname(settings_filepath)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
fp = open(settings_filepath, 'w')
import base64
output = open(template_filename).read()
output = output % {
'default_key': base64.b64encode(os.urandom(KEY_LENGTH)),
}
fp.write(output)
fp.close()
def parse_args(args):
"""Parses the given arguments.
:param args: List of command-line arguments as got from sys.argv.
:return: 3-element tuple: (args, command, command_args)
"""
index = None
for i, arg in enumerate(args):
if not arg.startswith('-'):
index = i
break
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1):])
def configure_app(project, config_path, django_settings_module, runner_name):
"""Determines which settings file to use and sets environment variables
accordingly.
:param project: Project's name. Will be used to generate the settings
environment variable.
:param config_path: The path to the user's configuration file.
:param django_settings_module: The module that ``DJANGO_SETTINGS_MODULE``
will be set to.
:param runner_name: The name of the running script.
"""
settings_envvar = project.upper() + '_SETTINGS'
# Normalize path and expand ~ constructions
config_path = os.path.normpath(os.path.abspath(
os.path.expanduser(config_path),
)
)
if not (os.path.exists(config_path) or
os.environ.get(settings_envvar, None)):
print u"Configuration file does not exist at %r or " \
u"%r environment variable has not been set.\n" \
u"Use '%s init' to initialize the configuration file." % \
(config_path, settings_envvar, runner_name)
sys.exit(2)
os.environ.setdefault(settings_envvar, config_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', django_settings_module)
def run_app(project, default_settings_path, settings_template,
django_settings_module):
"""Wrapper around django-admin.py.
:param project: Project's name.
:param default_settings_path: Default filepath to search for custom
settings. This will also be used as a default location for writing
initial settings.
:param settings_template: Template file for initializing settings from.
:param django_settings_module: The module that ``DJANGO_SETTINGS_MODULE``
will be set to.
"""
sys_args = sys.argv
runner_name = os.path.basename(sys_args[0])
(args, command, command_args) = parse_args(sys_args[1:])
if not (command or args):
# XXX: Should we display a more verbose help/usage message?
print "Usage: %s [--config=/path/to/settings.conf] [command] " \
"[options]" % runner_name
sys.exit(2)
if command == 'init':
noinput = '--noinput' in command_args
if noinput:
command_args.remove('--noinput')
# Determine which config file to write
try:
import re
config_path = command_args[0]
# Remove possible initial dashes
config_path = re.sub('^-+', '', config_path)
except IndexError:
config_path = default_settings_path
config_path = os.path.expanduser(config_path)
if os.path.exists(config_path):
resp = None
if noinput:
resp = 'n'
while resp not in ('Y', 'n'):
resp = raw_input('File already exists at %r, overwrite? [nY] ' \
% config_path)
if resp == 'n':
print "File already exists, not overwriting."
return
try:
init_settings(config_path, settings_template)
except (IOError, OSError) as e:
raise e.__class__, 'Unable to write default settings file to %r' \
% config_path
print "Configuration file created at %r" % config_path
return
parser = OptionParser()
parser.add_option('--config', metavar='CONFIG',
default=default_settings_path,
help=u'Use the specified configuration file.')
parser.add_option('-v', '--version', action='store_true',
default=False,
help=u'Display version information and exit.')
(opts, opt_args) = parser.parse_args(args)
if opts.version:
from pootle import __version__
from translate import __version__ as tt_version
from django import get_version
print "Pootle %s" % __version__.sver
print "Translate Toolkit %s" % tt_version.sver
print "Django %s" % get_version()
return
configure_app(project=project, config_path=opts.config,
django_settings_module=django_settings_module,
runner_name=runner_name)
management.execute_from_command_line([runner_name, command] + command_args)
sys.exit(0)
def main():
src_dir = os.path.abspath(os.path.dirname(__file__))
settings_template = os.path.join(src_dir, SETTINGS_TEMPLATE_FILENAME)
run_app(project='pootle',
default_settings_path=DEFAULT_SETTINGS_PATH,
settings_template=settings_template,
django_settings_module='pootle.settings')
if __name__ == '__main__':
main()
|
arky/pootle-dev
|
pootle/runner.py
|
Python
|
gpl-2.0
| 6,998
|
# #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Generic EasyBuild support for building and installing software.
The EasyBlock class should serve as a base class for all easyblocks.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import copy
import glob
import inspect
import os
import shutil
import stat
import time
import traceback
from distutils.version import LooseVersion
from vsc.utils import fancylogger
from vsc.utils.missing import get_class_for
import easybuild.tools.environment as env
from easybuild.tools import config, filetools
from easybuild.framework.easyconfig import EASYCONFIGS_PKG_SUBDIR
from easybuild.framework.easyconfig.easyconfig import ITERATE_OPTIONS, EasyConfig, ActiveMNS
from easybuild.framework.easyconfig.easyconfig import get_easyblock_class, get_module_path, resolve_template
from easybuild.framework.easyconfig.parser import fetch_parameters_from_easyconfig
from easybuild.framework.easyconfig.tools import get_paths_for
from easybuild.framework.easyconfig.templates import TEMPLATE_NAMES_EASYBLOCK_RUN_STEP
from easybuild.tools.build_details import get_build_stats
from easybuild.tools.build_log import EasyBuildError, print_error, print_msg
from easybuild.tools.config import build_option, build_path, get_log_filename, get_repository, get_repositorypath
from easybuild.tools.config import install_path, log_path, package_path, source_paths
from easybuild.tools.environment import restore_env
from easybuild.tools.filetools import DEFAULT_CHECKSUM
from easybuild.tools.filetools import adjust_permissions, apply_patch, convert_name, download_file, encode_class_name
from easybuild.tools.filetools import extract_file, mkdir, move_logs, read_file, rmtree2
from easybuild.tools.filetools import write_file, compute_checksum, verify_checksum
from easybuild.tools.run import run_cmd
from easybuild.tools.jenkins import write_to_xml
from easybuild.tools.module_generator import ModuleGeneratorLua, ModuleGeneratorTcl, module_generator
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.modules import ROOT_ENV_VAR_NAME_PREFIX, VERSION_ENV_VAR_NAME_PREFIX, DEVEL_ENV_VAR_NAME_PREFIX
from easybuild.tools.modules import get_software_root, modules_tool
from easybuild.tools.package.utilities import package
from easybuild.tools.repository.repository import init_repository
from easybuild.tools.toolchain import DUMMY_TOOLCHAIN_NAME
from easybuild.tools.systemtools import det_parallelism, use_group
from easybuild.tools.utilities import remove_unwanted_chars
from easybuild.tools.version import this_is_easybuild, VERBOSE_VERSION, VERSION
BUILD_STEP = 'build'
CLEANUP_STEP = 'cleanup'
CONFIGURE_STEP = 'configure'
EXTENSIONS_STEP = 'extensions'
FETCH_STEP = 'fetch'
MODULE_STEP = 'module'
PACKAGE_STEP = 'package'
PATCH_STEP = 'patch'
PERMISSIONS_STEP = 'permissions'
POSTPROC_STEP = 'postproc'
PREPARE_STEP = 'prepare'
READY_STEP = 'ready'
SANITYCHECK_STEP = 'sanitycheck'
SOURCE_STEP = 'source'
TEST_STEP = 'test'
TESTCASES_STEP = 'testcases'
MODULE_ONLY_STEPS = [MODULE_STEP, PREPARE_STEP, READY_STEP, SANITYCHECK_STEP]
_log = fancylogger.getLogger('easyblock')
class EasyBlock(object):
"""Generic support for building and installing software, base class for actual easyblocks."""
# static class method for extra easyconfig parameter definitions
# this makes it easy to access the information without needing an instance
# subclasses of EasyBlock should call this method with a dictionary
@staticmethod
def extra_options(extra=None):
"""
Extra options method which will be passed to the EasyConfig constructor.
"""
if extra is None:
extra = {}
if not isinstance(extra, dict):
_log.nosupport("Obtained 'extra' value of type '%s' in extra_options, should be 'dict'" % type(extra), '2.0')
return extra
#
# INIT
#
def __init__(self, ec):
"""
Initialize the EasyBlock instance.
@param ec: a parsed easyconfig file (EasyConfig instance)
"""
# keep track of original working directory, so we can go back there
self.orig_workdir = os.getcwd()
# list of patch/source files, along with checksums
self.patches = []
self.src = []
self.checksums = []
# build/install directories
self.builddir = None
self.installdir = None
# extensions
self.exts = None
self.exts_all = None
self.ext_instances = []
self.skip = None
self.module_extra_extensions = '' # extra stuff for module file required by extensions
# modules interface with default MODULEPATH
self.modules_tool = modules_tool()
# module generator
self.module_generator = module_generator(self, fake=True)
# modules footer
self.modules_footer = None
modules_footer_path = build_option('modules_footer')
if modules_footer_path is not None:
self.modules_footer = read_file(modules_footer_path)
# easyconfig for this application
if isinstance(ec, EasyConfig):
self.cfg = ec
else:
raise EasyBuildError("Value of incorrect type passed to EasyBlock constructor: %s ('%s')", type(ec), ec)
# determine install subdirectory, based on module name
self.install_subdir = None
# indicates whether build should be performed in installation dir
self.build_in_installdir = self.cfg['buildininstalldir']
# logging
self.log = None
self.logfile = None
self.logdebug = build_option('debug')
self.postmsg = '' # allow a post message to be set, which can be shown as last output
# list of loaded modules
self.loaded_modules = []
# iterate configure/build/options
self.iter_opts = {}
# sanity check fail error messages to report (if any)
self.sanity_check_fail_msgs = []
# robot path
self.robot_path = build_option('robot_path')
# original module path
self.orig_modulepath = os.getenv('MODULEPATH')
# keep track of initial environment we start in, so we can restore it if needed
self.initial_environ = copy.deepcopy(os.environ)
# initialize logger
self._init_log()
# should we keep quiet?
self.silent = build_option('silent')
# try and use the specified group (if any)
group_name = build_option('group')
if self.cfg['group'] is not None:
self.log.warning("Group spec '%s' is overriding config group '%s'." % (self.cfg['group'], group_name))
group_name = self.cfg['group']
self.group = None
if group_name is not None:
self.group = use_group(group_name)
# generate build/install directories
self.gen_builddir()
self.gen_installdir()
self.log.info("Init completed for application name %s version %s" % (self.name, self.version))
# INIT/CLOSE LOG
def _init_log(self):
"""
Initialize the logger.
"""
if not self.log is None:
return
self.logfile = get_log_filename(self.name, self.version, add_salt=True)
fancylogger.logToFile(self.logfile)
self.log = fancylogger.getLogger(name=self.__class__.__name__, fname=False)
self.log.info(this_is_easybuild())
this_module = inspect.getmodule(self)
self.log.info("This is easyblock %s from module %s (%s)",
self.__class__.__name__, this_module.__name__, this_module.__file__)
def close_log(self):
"""
Shutdown the logger.
"""
self.log.info("Closing log for application name %s version %s" % (self.name, self.version))
fancylogger.logToFile(self.logfile, enable=False)
#
# FETCH UTILITY FUNCTIONS
#
def get_checksum_for(self, checksums, filename=None, index=None):
"""
Obtain checksum for given filename.
@param checksums: a list or tuple of checksums (or None)
@param filename: name of the file to obtain checksum for
@param index: index of file in list
"""
# if checksums are provided as a dict, lookup by source filename as key
if isinstance(checksums, (list, tuple)):
if index is not None and index < len(checksums) and (index >= 0 or abs(index) <= len(checksums)):
return checksums[index]
else:
return None
elif checksums is None:
return None
else:
raise EasyBuildError("Invalid type for checksums (%s), should be list, tuple or None.", type(checksums))
def fetch_sources(self, list_of_sources, checksums=None):
"""
Add a list of source files (can be tarballs, isos, urls).
All source files will be checked if a file exists (or can be located)
"""
for index, src_entry in enumerate(list_of_sources):
if isinstance(src_entry, (list, tuple)):
cmd = src_entry[1]
source = src_entry[0]
elif isinstance(src_entry, basestring):
cmd = None
source = src_entry
# check if the sources can be located
path = self.obtain_file(source)
if path:
self.log.debug('File %s found for source %s' % (path, source))
self.src.append({
'name': source,
'path': path,
'cmd': cmd,
'checksum': self.get_checksum_for(checksums, filename=source, index=index),
# always set a finalpath
'finalpath': self.builddir,
})
else:
raise EasyBuildError('No file found for source %s', source)
self.log.info("Added sources: %s" % self.src)
def fetch_patches(self, patch_specs=None, extension=False, checksums=None):
"""
Add a list of patches.
All patches will be checked if a file exists (or can be located)
"""
if patch_specs is None:
patch_specs = self.cfg['patches']
patches = []
for index, patch_spec in enumerate(patch_specs):
# check if the patches can be located
copy_file = False
suff = None
level = None
if isinstance(patch_spec, (list, tuple)):
if not len(patch_spec) == 2:
raise EasyBuildError("Unknown patch specification '%s', only 2-element lists/tuples are supported!",
str(patch_spec))
patch_file = patch_spec[0]
# this *must* be of typ int, nothing else
# no 'isinstance(..., int)', since that would make True/False also acceptable
if type(patch_spec[1]) == int:
level = patch_spec[1]
elif isinstance(patch_spec[1], basestring):
# non-patch files are assumed to be files to copy
if not patch_spec[0].endswith('.patch'):
copy_file = True
suff = patch_spec[1]
else:
raise EasyBuildError("Wrong patch spec '%s', only int/string are supported as 2nd element",
str(patch_spec))
else:
patch_file = patch_spec
path = self.obtain_file(patch_file, extension=extension)
if path:
self.log.debug('File %s found for patch %s' % (path, patch_spec))
patchspec = {
'name': patch_file,
'path': path,
'checksum': self.get_checksum_for(checksums, filename=patch_file, index=index),
}
if suff:
if copy_file:
patchspec['copy'] = suff
else:
patchspec['sourcepath'] = suff
if level is not None:
patchspec['level'] = level
if extension:
patches.append(patchspec)
else:
self.patches.append(patchspec)
else:
raise EasyBuildError('No file found for patch %s', patch_spec)
if extension:
self.log.info("Fetched extension patches: %s" % patches)
return [patch['path'] for patch in patches]
else:
self.log.info("Added patches: %s" % self.patches)
def fetch_extension_sources(self):
"""
Find source file for extensions.
"""
exts_sources = []
self.cfg.enable_templating = False
exts_list = self.cfg['exts_list']
self.cfg.enable_templating = True
for ext in exts_list:
if (isinstance(ext, list) or isinstance(ext, tuple)) and ext:
# expected format: (name, version, options (dict))
ext_name = ext[0]
if len(ext) == 1:
exts_sources.append({'name': ext_name})
else:
ext_version = ext[1]
ext_options = {}
def_src_tmpl = "%(name)s-%(version)s.tar.gz"
if len(ext) == 3:
ext_options = ext[2]
if not isinstance(ext_options, dict):
raise EasyBuildError("Unexpected type (non-dict) for 3rd element of %s", ext)
elif len(ext) > 3:
raise EasyBuildError('Extension specified in unknown format (list/tuple too long)')
ext_src = {
'name': ext_name,
'version': ext_version,
'options': ext_options,
}
checksums = ext_options.get('checksums', None)
if ext_options.get('source_tmpl', None):
fn = resolve_template(ext_options['source_tmpl'], ext_src)
else:
fn = resolve_template(def_src_tmpl, ext_src)
if ext_options.get('nosource', None):
exts_sources.append(ext_src)
else:
source_urls = [resolve_template(url, ext_src) for url in ext_options.get('source_urls', [])]
src_fn = self.obtain_file(fn, extension=True, urls=source_urls)
if src_fn:
ext_src.update({'src': src_fn})
if checksums:
fn_checksum = self.get_checksum_for(checksums, filename=src_fn, index=0)
if verify_checksum(src_fn, fn_checksum):
self.log.info('Checksum for ext source %s verified' % fn)
else:
raise EasyBuildError('Checksum for ext source %s failed', fn)
ext_patches = self.fetch_patches(patch_specs=ext_options.get('patches', []), extension=True)
if ext_patches:
self.log.debug('Found patches for extension %s: %s' % (ext_name, ext_patches))
ext_src.update({'patches': ext_patches})
if checksums:
self.log.debug('Verifying checksums for extension patches...')
for index, ext_patch in enumerate(ext_patches):
checksum = self.get_checksum_for(checksums[1:], filename=ext_patch, index=index)
if verify_checksum(ext_patch, checksum):
self.log.info('Checksum for extension patch %s verified' % ext_patch)
else:
raise EasyBuildError('Checksum for extension patch %s failed', ext_patch)
else:
self.log.debug('No patches found for extension %s.' % ext_name)
exts_sources.append(ext_src)
else:
raise EasyBuildError("Source for extension %s not found.", ext)
elif isinstance(ext, basestring):
exts_sources.append({'name': ext})
else:
raise EasyBuildError("Extension specified in unknown format (not a string/list/tuple)")
return exts_sources
def obtain_file(self, filename, extension=False, urls=None):
"""
Locate the file with the given name
- searches in different subdirectories of source path
- supports fetching file from the web if path is specified as an url (i.e. starts with "http://:")
"""
srcpaths = source_paths()
# should we download or just try and find it?
if filename.startswith("http://") or filename.startswith("ftp://"):
# URL detected, so let's try and download it
url = filename
filename = url.split('/')[-1]
# figure out where to download the file to
filepath = os.path.join(srcpaths[0], self.name[0].lower(), self.name)
if extension:
filepath = os.path.join(filepath, "extensions")
self.log.info("Creating path %s to download file to" % filepath)
mkdir(filepath, parents=True)
try:
fullpath = os.path.join(filepath, filename)
# only download when it's not there yet
if os.path.exists(fullpath):
self.log.info("Found file %s at %s, no need to download it." % (filename, filepath))
return fullpath
else:
if download_file(filename, url, fullpath):
return fullpath
except IOError, err:
raise EasyBuildError("Downloading file %s from url %s to %s failed: %s", filename, url, fullpath, err)
else:
# try and find file in various locations
foundfile = None
failedpaths = []
# always look first in the dir of the current eb file
ebpath = [os.path.dirname(self.cfg.path)]
# always consider robot + easyconfigs install paths as a fall back (e.g. for patch files, test cases, ...)
common_filepaths = []
if self.robot_path:
common_filepaths.extend(self.robot_path)
common_filepaths.extend(get_paths_for(subdir=EASYCONFIGS_PKG_SUBDIR, robot_path=self.robot_path))
for path in ebpath + common_filepaths + srcpaths:
# create list of candidate filepaths
namepath = os.path.join(path, self.name)
letterpath = os.path.join(path, self.name.lower()[0], self.name)
# most likely paths
candidate_filepaths = [
letterpath, # easyblocks-style subdir
namepath, # subdir with software name
path, # directly in directory
]
# see if file can be found at that location
for cfp in candidate_filepaths:
fullpath = os.path.join(cfp, filename)
# also check in 'extensions' subdir for extensions
if extension:
fullpaths = [
os.path.join(cfp, "extensions", filename),
os.path.join(cfp, "packages", filename), # legacy
fullpath
]
else:
fullpaths = [fullpath]
for fp in fullpaths:
if os.path.isfile(fp):
self.log.info("Found file %s at %s" % (filename, fp))
foundfile = os.path.abspath(fp)
break # no need to try further
else:
failedpaths.append(fp)
if foundfile:
break # no need to try other source paths
if foundfile:
return foundfile
else:
# try and download source files from specified source URLs
if urls:
source_urls = urls
else:
source_urls = []
source_urls.extend(self.cfg['source_urls'])
targetdir = os.path.join(srcpaths[0], self.name.lower()[0], self.name)
mkdir(targetdir, parents=True)
for url in source_urls:
if extension:
targetpath = os.path.join(targetdir, "extensions", filename)
else:
targetpath = os.path.join(targetdir, filename)
if isinstance(url, basestring):
if url[-1] in ['=', '/']:
fullurl = "%s%s" % (url, filename)
else:
fullurl = "%s/%s" % (url, filename)
elif isinstance(url, tuple):
# URLs that require a suffix, e.g., SourceForge download links
# e.g. http://sourceforge.net/projects/math-atlas/files/Stable/3.8.4/atlas3.8.4.tar.bz2/download
fullurl = "%s/%s/%s" % (url[0], filename, url[1])
else:
self.log.warning("Source URL %s is of unknown type, so ignoring it." % url)
continue
self.log.debug("Trying to download file %s from %s to %s ..." % (filename, fullurl, targetpath))
downloaded = False
try:
if download_file(filename, fullurl, targetpath):
downloaded = True
except IOError, err:
self.log.debug("Failed to download %s from %s: %s" % (filename, url, err))
failedpaths.append(fullurl)
continue
if downloaded:
# if fetching from source URL worked, we're done
self.log.info("Successfully downloaded source file %s from %s" % (filename, fullurl))
return targetpath
else:
failedpaths.append(fullurl)
raise EasyBuildError("Couldn't find file %s anywhere, and downloading it didn't work either... "
"Paths attempted (in order): %s ", filename, ', '.join(failedpaths))
#
# GETTER/SETTER UTILITY FUNCTIONS
#
@property
def name(self):
"""
Shortcut the get the module name.
"""
return self.cfg['name']
@property
def version(self):
"""
Shortcut the get the module version.
"""
return self.cfg['version']
@property
def toolchain(self):
"""
Toolchain used to build this easyblock
"""
return self.cfg.toolchain
@property
def full_mod_name(self):
"""
Full module name (including subdirectory in module install path)
"""
return self.cfg.full_mod_name
@property
def short_mod_name(self):
"""
Short module name (not including subdirectory in module install path)
"""
return self.cfg.short_mod_name
@property
def moduleGenerator(self):
"""
Module generator (DEPRECATED, use self.module_generator instead).
"""
self.log.nosupport("self.moduleGenerator is replaced by self.module_generator", '2.0')
#
# DIRECTORY UTILITY FUNCTIONS
#
def gen_builddir(self):
"""Generate the (unique) name for the builddir"""
clean_name = remove_unwanted_chars(self.name)
# if a toolchain version starts with a -, remove the - so prevent a -- in the path name
tcversion = self.toolchain.version.lstrip('-')
lastdir = "%s%s-%s%s" % (self.cfg['versionprefix'], self.toolchain.name, tcversion, self.cfg['versionsuffix'])
builddir = os.path.join(os.path.abspath(build_path()), clean_name, self.version, lastdir)
# make sure build dir is unique if cleanupoldbuild is False or not set
if not self.cfg.get('cleanupoldbuild', False):
uniq_builddir = builddir
suff = 0
while(os.path.isdir(uniq_builddir)):
uniq_builddir = "%s.%d" % (builddir, suff)
suff += 1
builddir = uniq_builddir
self.builddir = builddir
self.log.info("Build dir set to %s" % self.builddir)
def make_builddir(self):
"""
Create the build directory.
"""
if not self.build_in_installdir:
# self.builddir should be already set by gen_builddir()
if not self.builddir:
raise EasyBuildError("self.builddir not set, make sure gen_builddir() is called first!")
self.log.debug("Creating the build directory %s (cleanup: %s)", self.builddir, self.cfg['cleanupoldbuild'])
else:
self.log.info("Changing build dir to %s" % self.installdir)
self.builddir = self.installdir
self.log.info("Overriding 'cleanupoldinstall' (to False), 'cleanupoldbuild' (to True) "
"and 'keeppreviousinstall' because we're building in the installation directory.")
# force cleanup before installation
self.cfg['cleanupoldbuild'] = True
self.cfg['keeppreviousinstall'] = False
# avoid cleanup after installation
self.cfg['cleanupoldinstall'] = False
# always make build dir
self.make_dir(self.builddir, self.cfg['cleanupoldbuild'])
def gen_installdir(self):
"""
Generate the name of the installation directory.
"""
basepath = install_path()
if basepath:
self.install_subdir = ActiveMNS().det_install_subdir(self.cfg)
self.installdir = os.path.join(os.path.abspath(basepath), self.install_subdir)
self.log.info("Install dir set to %s" % self.installdir)
else:
raise EasyBuildError("Can't set installation directory")
def make_installdir(self, dontcreate=None):
"""
Create the installation directory.
"""
self.log.debug("Creating the installation directory %s (cleanup: %s)" % (self.installdir,
self.cfg['cleanupoldinstall']))
if self.build_in_installdir:
self.cfg['keeppreviousinstall'] = True
dontcreate = (dontcreate is None and self.cfg['dontcreateinstalldir']) or dontcreate
self.make_dir(self.installdir, self.cfg['cleanupoldinstall'], dontcreateinstalldir=dontcreate)
def make_dir(self, dir_name, clean, dontcreateinstalldir=False):
"""
Create the directory.
"""
if os.path.exists(dir_name):
self.log.info("Found old directory %s" % dir_name)
if self.cfg['keeppreviousinstall']:
self.log.info("Keeping old directory %s (hopefully you know what you are doing)" % dir_name)
return
elif clean:
try:
rmtree2(dir_name)
self.log.info("Removed old directory %s" % dir_name)
except OSError, err:
raise EasyBuildError("Removal of old directory %s failed: %s", dir_name, err)
else:
try:
timestamp = time.strftime("%Y%m%d-%H%M%S")
backupdir = "%s.%s" % (dir_name, timestamp)
shutil.move(dir_name, backupdir)
self.log.info("Moved old directory %s to %s" % (dir_name, backupdir))
except OSError, err:
raise EasyBuildError("Moving old directory to backup %s %s failed: %s", dir_name, backupdir, err)
if dontcreateinstalldir:
olddir = dir_name
dir_name = os.path.dirname(dir_name)
self.log.info("Cleaning only, no actual creation of %s, only verification/defining of dirname %s" % (olddir, dir_name))
if os.path.exists(dir_name):
return
# if not, create dir as usual
mkdir(dir_name, parents=True)
#
# MODULE UTILITY FUNCTIONS
#
def make_devel_module(self, create_in_builddir=False):
"""
Create a develop module file which sets environment based on the build
Usage: module load name, which loads the module you want to use. $EBDEVELNAME should then be the full path
to the devel module file. So now you can module load $EBDEVELNAME.
WARNING: you cannot unload using $EBDEVELNAME (for now: use module unload `basename $EBDEVELNAME`)
"""
self.log.info("Making devel module...")
# load fake module
fake_mod_data = self.load_fake_module(purge=True)
header = self.module_generator.MODULE_HEADER
if header:
header += '\n'
load_lines = []
# capture all the EBDEVEL vars
# these should be all the dependencies and we should load them
for key in os.environ:
# legacy support
if key.startswith(DEVEL_ENV_VAR_NAME_PREFIX):
if not key.endswith(convert_name(self.name, upper=True)):
path = os.environ[key]
if os.path.isfile(path):
mod_name = path.rsplit(os.path.sep, 1)[-1]
load_lines.append(self.module_generator.load_module(mod_name))
elif key.startswith('SOFTDEVEL'):
self.log.nosupport("Environment variable SOFTDEVEL* being relied on", '2.0')
env_lines = []
for (key, val) in env.get_changes().items():
# check if non-empty string
# TODO: add unset for empty vars?
if val.strip():
env_lines.append(self.module_generator.set_environment(key, val))
if create_in_builddir:
output_dir = self.builddir
else:
output_dir = os.path.join(self.installdir, log_path())
mkdir(output_dir, parents=True)
filename = os.path.join(output_dir, ActiveMNS().det_devel_module_filename(self.cfg))
self.log.debug("Writing devel module to %s" % filename)
txt = ''.join([header] + load_lines + env_lines)
write_file(filename, txt)
# cleanup: unload fake module, remove fake module dir
self.clean_up_fake_module(fake_mod_data)
def make_module_dep(self):
"""
Make the dependencies for the module file.
"""
deps = []
mns = ActiveMNS()
# include load statements for toolchain, either directly or for toolchain dependencies
if self.toolchain.name != DUMMY_TOOLCHAIN_NAME:
if mns.expand_toolchain_load():
mod_names = self.toolchain.toolchain_dep_mods
deps.extend(mod_names)
self.log.debug("Adding toolchain components as module dependencies: %s" % mod_names)
else:
deps.append(self.toolchain.det_short_module_name())
self.log.debug("Adding toolchain %s as a module dependency" % deps[-1])
# include load/unload statements for dependencies
builddeps = self.cfg.builddependencies()
# include 'module load' statements for dependencies in reverse order
for dep in self.toolchain.dependencies:
if not dep in builddeps:
modname = dep['short_mod_name']
self.log.debug("Adding %s as a module dependency" % modname)
deps.append(modname)
else:
self.log.debug("Skipping build dependency %s" % str(dep))
self.log.debug("Full list of dependencies: %s" % deps)
# exclude dependencies that extend $MODULEPATH and form the path to the top of the module tree (if any)
mod_install_path = os.path.join(install_path('mod'), build_option('suffix_modules_path'))
full_mod_subdir = os.path.join(mod_install_path, self.cfg.mod_subdir)
init_modpaths = mns.det_init_modulepaths(self.cfg)
top_paths = [mod_install_path] + [os.path.join(mod_install_path, p) for p in init_modpaths]
excluded_deps = self.modules_tool.path_to_top_of_module_tree(top_paths, self.cfg.short_mod_name,
full_mod_subdir, deps)
deps = [d for d in deps if d not in excluded_deps]
self.log.debug("List of retained dependencies: %s" % deps)
loads = [self.module_generator.load_module(d) for d in deps]
unloads = [self.module_generator.unload_module(d) for d in deps[::-1]]
# Force unloading any other modules
if self.cfg['moduleforceunload']:
return ''.join(unloads) + ''.join(loads)
else:
return ''.join(loads)
def make_module_description(self):
"""
Create the module description.
"""
return self.module_generator.get_description()
def make_module_extra(self):
"""
Sets optional variables (EBROOT, MPI tuning variables).
"""
lines = ['']
# EBROOT + EBVERSION + EBDEVEL
env_name = convert_name(self.name, upper=True)
lines.append(self.module_generator.set_environment(ROOT_ENV_VAR_NAME_PREFIX + env_name, '', relpath=True))
lines.append(self.module_generator.set_environment(VERSION_ENV_VAR_NAME_PREFIX + env_name, self.version))
devel_path = os.path.join(log_path(), ActiveMNS().det_devel_module_filename(self.cfg))
devel_path_envvar = DEVEL_ENV_VAR_NAME_PREFIX + env_name
lines.append(self.module_generator.set_environment(devel_path_envvar, devel_path, relpath=True))
lines.append('\n')
for (key, value) in self.cfg['modextravars'].items():
lines.append(self.module_generator.set_environment(key, value))
for (key, value) in self.cfg['modextrapaths'].items():
if isinstance(value, basestring):
value = [value]
elif not isinstance(value, (tuple, list)):
raise EasyBuildError("modextrapaths dict value %s (type: %s) is not a list or tuple",
value, type(value))
lines.append(self.module_generator.prepend_paths(key, value))
if self.cfg['modloadmsg']:
lines.append(self.module_generator.msg_on_load(self.cfg['modloadmsg']))
if self.cfg['modtclfooter']:
if isinstance(self.module_generator, ModuleGeneratorTcl):
self.log.debug("Including Tcl footer in module: %s", self.cfg['modtclfooter'])
lines.extend([self.cfg['modtclfooter'], '\n'])
else:
self.log.warning("Not including footer in Tcl syntax in non-Tcl module file: %s",
self.cfg['modtclfooter'])
if self.cfg['modluafooter']:
if isinstance(self.module_generator, ModuleGeneratorLua):
self.log.debug("Including Lua footer in module: %s", self.cfg['modluafooter'])
lines.extend([self.cfg['modluafooter'], '\n'])
else:
self.log.warning("Not including footer in Lua syntax in non-Lua module file: %s",
self.cfg['modluafooter'])
for (key, value) in self.cfg['modaliases'].items():
lines.append(self.module_generator.set_alias(key, value))
txt = ''.join(lines)
self.log.debug("make_module_extra added this: %s", txt)
return txt
def make_module_extra_extensions(self):
"""
Sets optional variables for extensions.
"""
# add stuff specific to individual extensions
lines = [self.module_extra_extensions]
# set environment variable that specifies list of extensions
if self.exts_all:
exts_list = ','.join(['%s-%s' % (ext['name'], ext.get('version', '')) for ext in self.exts_all])
env_var_name = convert_name(self.name, upper=True)
lines.append(self.module_generator.set_environment('EBEXTSLIST%s' % env_var_name, exts_list))
return ''.join(lines)
def make_module_footer(self):
"""
Insert a footer section in the modulefile, primarily meant for contextual information
"""
footer = [self.module_generator.comment("Built with EasyBuild version %s" % VERBOSE_VERSION)]
# add extra stuff for extensions (if any)
if self.cfg['exts_list']:
footer.append(self.make_module_extra_extensions())
# include modules footer if one is specified
if self.modules_footer is not None:
self.log.debug("Including specified footer into module: '%s'" % self.modules_footer)
footer.append(self.modules_footer)
return ''.join(footer)
def make_module_extend_modpath(self):
"""
Include prepend-path statements for extending $MODULEPATH.
"""
txt = ''
if self.cfg['include_modpath_extensions']:
top_modpath = install_path('mod')
mod_path_suffix = build_option('suffix_modules_path')
modpath_exts = ActiveMNS().det_modpath_extensions(self.cfg)
self.log.debug("Including module path extensions returned by module naming scheme: %s" % modpath_exts)
full_path_modpath_extensions = [os.path.join(top_modpath, mod_path_suffix, ext) for ext in modpath_exts]
# module path extensions must exist, otherwise loading this module file will fail
for modpath_extension in full_path_modpath_extensions:
mkdir(modpath_extension, parents=True)
txt = self.module_generator.use(full_path_modpath_extensions)
else:
self.log.debug("Not including module path extensions, as specified.")
return txt
def make_module_req(self):
"""
Generate the environment-variables to run the module.
"""
requirements = self.make_module_req_guess()
lines = []
if os.path.isdir(self.installdir):
try:
os.chdir(self.installdir)
except OSError, err:
raise EasyBuildError("Failed to change to %s: %s", self.installdir, err)
lines.append('\n')
for key in sorted(requirements):
for path in requirements[key]:
paths = sorted(glob.glob(path))
if paths:
lines.append(self.module_generator.prepend_paths(key, paths))
try:
os.chdir(self.orig_workdir)
except OSError, err:
raise EasyBuildError("Failed to change back to %s: %s", self.orig_workdir, err)
return ''.join(lines)
def make_module_req_guess(self):
"""
A dictionary of possible directories to look for.
"""
return {
'PATH': ['bin', 'sbin'],
'LD_LIBRARY_PATH': ['lib', 'lib64', 'lib32'],
'LIBRARY_PATH': ['lib', 'lib64', 'lib32'],
'CPATH': ['include'],
'MANPATH': ['man', 'share/man'],
'PKG_CONFIG_PATH': ['lib/pkgconfig', 'share/pkgconfig'],
'ACLOCAL_PATH': ['share/aclocal'],
'CLASSPATH': ['*.jar'],
}
def load_module(self, mod_paths=None, purge=True):
"""
Load module for this software package/version, after purging all currently loaded modules.
"""
# self.full_mod_name might not be set (e.g. during unit tests)
if self.full_mod_name is not None:
if mod_paths is None:
mod_paths = []
all_mod_paths = mod_paths + ActiveMNS().det_init_modulepaths(self.cfg)
mods = [self.full_mod_name]
self.modules_tool.load(mods, mod_paths=all_mod_paths, purge=purge, init_env=self.initial_environ)
else:
self.log.warning("Not loading module, since self.full_mod_name is not set.")
def load_fake_module(self, purge=False):
"""
Create and load fake module.
"""
# take a copy of the current environment before loading the fake module, so we can restore it
env = copy.deepcopy(os.environ)
# create fake module
fake_mod_path = self.make_module_step(fake=True)
# load fake module
self.modules_tool.prepend_module_path(fake_mod_path)
self.load_module(purge=purge)
return (fake_mod_path, env)
def clean_up_fake_module(self, fake_mod_data):
"""
Clean up fake module.
"""
fake_mod_path, env = fake_mod_data
# unload module and remove temporary module directory
# self.full_mod_name might not be set (e.g. during unit tests)
if fake_mod_path and self.full_mod_name is not None:
try:
self.modules_tool.unload([self.full_mod_name])
self.modules_tool.remove_module_path(fake_mod_path)
rmtree2(os.path.dirname(fake_mod_path))
except OSError, err:
raise EasyBuildError("Failed to clean up fake module dir %s: %s", fake_mod_path, err)
elif self.full_mod_name is None:
self.log.warning("Not unloading module, since self.full_mod_name is not set.")
# restore original environment
restore_env(env)
def load_dependency_modules(self):
"""Load dependency modules."""
self.modules_tool.load([ActiveMNS().det_full_module_name(dep) for dep in self.cfg.dependencies()])
#
# EXTENSIONS UTILITY FUNCTIONS
#
def prepare_for_extensions(self):
"""
Also do this before (eg to set the template)
"""
pass
def skip_extensions(self):
"""
Called when self.skip is True
- use this to detect existing extensions and to remove them from self.exts
- based on initial R version
"""
# disabling templating is required here to support legacy string templates like name/version
self.cfg.enable_templating = False
exts_filter = self.cfg['exts_filter']
self.cfg.enable_templating = True
if not exts_filter or len(exts_filter) == 0:
raise EasyBuildError("Skipping of extensions, but no exts_filter set in easyconfig")
elif isinstance(exts_filter, basestring) or len(exts_filter) != 2:
raise EasyBuildError('exts_filter should be a list or tuple of ("command","input")')
cmdtmpl = exts_filter[0]
cmdinputtmpl = exts_filter[1]
if not self.exts:
self.exts = []
res = []
for ext in self.exts:
name = ext['name']
if 'options' in ext and 'modulename' in ext['options']:
modname = ext['options']['modulename']
else:
modname = name
tmpldict = {
'ext_name': modname,
'ext_version': ext.get('version'),
'src': ext.get('source'),
}
try:
cmd = cmdtmpl % tmpldict
except KeyError, err:
msg = "KeyError occured on completing extension filter template: %s; "
msg += "'name'/'version' keys are no longer supported, should use 'ext_name'/'ext_version' instead"
self.log.nosupport(msg, '2.0')
if cmdinputtmpl:
stdin = cmdinputtmpl % tmpldict
(cmdstdouterr, ec) = run_cmd(cmd, log_all=False, log_ok=False, simple=False, inp=stdin, regexp=False)
else:
(cmdstdouterr, ec) = run_cmd(cmd, log_all=False, log_ok=False, simple=False, regexp=False)
self.log.info("exts_filter result %s %s", cmdstdouterr, ec)
if ec:
self.log.info("Not skipping %s" % name)
self.log.debug("exit code: %s, stdout/err: %s" % (ec, cmdstdouterr))
res.append(ext)
else:
self.log.info("Skipping %s" % name)
self.exts = res
#
# MISCELLANEOUS UTILITY FUNCTIONS
#
def guess_start_dir(self):
"""
Return the directory where to start the whole configure/make/make install cycle from
- typically self.src[0]['finalpath']
- start_dir option
-- if abspath: use that
-- else, treat it as subdir for regular procedure
"""
tmpdir = ''
if self.cfg['start_dir']:
tmpdir = self.cfg['start_dir']
if not os.path.isabs(tmpdir):
if len(self.src) > 0 and not self.skip and self.src[0]['finalpath']:
self.cfg['start_dir'] = os.path.join(self.src[0]['finalpath'], tmpdir)
else:
self.cfg['start_dir'] = os.path.join(self.builddir, tmpdir)
try:
os.chdir(self.cfg['start_dir'])
self.log.debug("Changed to real build directory %s" % (self.cfg['start_dir']))
except OSError, err:
raise EasyBuildError("Can't change to real build directory %s: %s", self.cfg['start_dir'], err)
def handle_iterate_opts(self):
"""Handle options relevant during iterated part of build/install procedure."""
# disable templating in this function, since we're messing about with values in self.cfg
self.cfg.enable_templating = False
# handle configure/build/install options that are specified as lists
# set first element to be used, keep track of list in *_list options dictionary
# this will only be done during first iteration, since after that the options won't be lists anymore
suffix = "_list"
sufflen = len(suffix)
for opt in ITERATE_OPTIONS:
# keep track of list, supply first element as first option to handle
if isinstance(self.cfg[opt], (list, tuple)):
self.iter_opts[opt + suffix] = self.cfg[opt] # copy
self.log.debug("Found list for %s: %s" % (opt, self.iter_opts[opt + suffix]))
# pop first element from all *_list options as next value to use
for (lsname, ls) in self.iter_opts.items():
opt = lsname[:-sufflen] # drop '_list' part from name to get option name
if len(self.iter_opts[lsname]) > 0:
self.cfg[opt] = self.iter_opts[lsname].pop(0) # first element will be used next
else:
self.cfg[opt] = '' # empty list => empty option as next value
self.log.debug("Next value for %s: %s" % (opt, str(self.cfg[opt])))
# re-enable templating before self.cfg values are used
self.cfg.enable_templating = True
def det_iter_cnt(self):
"""Determine iteration count based on configure/build/install options that may be lists."""
iter_cnt = max([1] + [len(self.cfg[opt]) for opt in ITERATE_OPTIONS
if isinstance(self.cfg[opt], (list, tuple))])
return iter_cnt
#
# STEP FUNCTIONS
#
def check_readiness_step(self):
"""
Verify if all is ok to start build.
"""
# set level of parallelism for build
par = build_option('parallel')
if self.cfg['parallel']:
if par is None:
par = self.cfg['parallel']
self.log.debug("Desired parallelism specified via 'parallel' easyconfig parameter: %s", par)
else:
par = min(int(par), int(self.cfg['parallel']))
self.log.debug("Desired parallelism: minimum of 'parallel' build option/easyconfig parameter: %s", par)
else:
self.log.debug("Desired parallelism specified via 'parallel' build option: %s", par)
self.cfg['parallel'] = det_parallelism(par=par, maxpar=self.cfg['maxparallel'])
self.log.info("Setting parallelism: %s" % self.cfg['parallel'])
# check whether modules are loaded
loadedmods = self.modules_tool.loaded_modules()
if len(loadedmods) > 0:
self.log.warning("Loaded modules detected: %s" % loadedmods)
# do all dependencies have a toolchain version?
self.toolchain.add_dependencies(self.cfg.dependencies())
if not len(self.cfg.dependencies()) == len(self.toolchain.dependencies):
self.log.debug("dep %s (%s)" % (len(self.cfg.dependencies()), self.cfg.dependencies()))
self.log.debug("tc.dep %s (%s)" % (len(self.toolchain.dependencies), self.toolchain.dependencies))
raise EasyBuildError('Not all dependencies have a matching toolchain version')
# check if the application is not loaded at the moment
(root, env_var) = get_software_root(self.name, with_env_var=True)
if root:
raise EasyBuildError("Module is already loaded (%s is set), installation cannot continue.", env_var)
# check if main install needs to be skipped
# - if a current module can be found, skip is ok
# -- this is potentially very dangerous
if self.cfg['skip']:
if self.modules_tool.exist([self.full_mod_name])[0]:
self.skip = True
self.log.info("Module %s found." % self.full_mod_name)
self.log.info("Going to skip actual main build and potential existing extensions. Expert only.")
else:
self.log.info("No module %s found. Not skipping anything." % self.full_mod_name)
def fetch_step(self, skip_checksums=False):
"""
prepare for building
"""
# check EasyBuild version
easybuild_version = self.cfg['easybuild_version']
if not easybuild_version:
self.log.warn("Easyconfig does not specify an EasyBuild-version (key 'easybuild_version')! "
"Assuming the latest version")
else:
if LooseVersion(easybuild_version) < VERSION:
self.log.warn("EasyBuild-version %s is older than the currently running one. Proceed with caution!",
easybuild_version)
elif LooseVersion(easybuild_version) > VERSION:
raise EasyBuildError("EasyBuild-version %s is newer than the currently running one. Aborting!",
easybuild_version)
# fetch sources
if self.cfg['sources']:
self.fetch_sources(self.cfg['sources'], checksums=self.cfg['checksums'])
else:
self.log.info('no sources provided')
# fetch extensions
if len(self.cfg['exts_list']) > 0:
self.exts = self.fetch_extension_sources()
# fetch patches
if self.cfg['patches']:
if isinstance(self.cfg['checksums'], (list, tuple)):
# if checksums are provided as a list, first entries are assumed to be for sources
patches_checksums = self.cfg['checksums'][len(self.cfg['sources']):]
else:
patches_checksums = self.cfg['checksums']
self.fetch_patches(checksums=patches_checksums)
else:
self.log.info('no patches provided')
# compute checksums for all source and patch files
if not skip_checksums:
for fil in self.src + self.patches:
check_sum = compute_checksum(fil['path'], checksum_type=DEFAULT_CHECKSUM)
fil[DEFAULT_CHECKSUM] = check_sum
self.log.info("%s checksum for %s: %s" % (DEFAULT_CHECKSUM, fil['path'], fil[DEFAULT_CHECKSUM]))
# create parent dirs in install and modules path already
# this is required when building in parallel
mod_path_suffix = build_option('suffix_modules_path')
mod_symlink_paths = ActiveMNS().det_module_symlink_paths(self.cfg)
parent_subdir = os.path.dirname(self.install_subdir)
pardirs = [
os.path.join(install_path(), parent_subdir),
os.path.join(install_path('mod'), mod_path_suffix, parent_subdir),
]
for mod_symlink_path in mod_symlink_paths:
pardirs.append(os.path.join(install_path('mod'), mod_symlink_path, parent_subdir))
self.log.info("Checking dirs that need to be created: %s" % pardirs)
for pardir in pardirs:
mkdir(pardir, parents=True)
def checksum_step(self):
"""Verify checksum of sources and patches, if a checksum is available."""
for fil in self.src + self.patches:
ok = verify_checksum(fil['path'], fil['checksum'])
if not ok:
raise EasyBuildError("Checksum verification for %s using %s failed.", fil['path'], fil['checksum'])
else:
self.log.info("Checksum verification for %s using %s passed." % (fil['path'], fil['checksum']))
def extract_step(self):
"""
Unpack the source files.
"""
for src in self.src:
self.log.info("Unpacking source %s" % src['name'])
srcdir = extract_file(src['path'], self.builddir, cmd=src['cmd'], extra_options=self.cfg['unpack_options'])
if srcdir:
self.src[self.src.index(src)]['finalpath'] = srcdir
else:
raise EasyBuildError("Unpacking source %s failed", src['name'])
def patch_step(self, beginpath=None):
"""
Apply the patches
"""
for patch in self.patches:
self.log.info("Applying patch %s" % patch['name'])
# patch source at specified index (first source if not specified)
srcind = patch.get('source', 0)
# if patch level is specified, use that (otherwise let apply_patch derive patch level)
level = patch.get('level', None)
# determine suffix of source path to apply patch in (if any)
srcpathsuffix = patch.get('sourcepath', patch.get('copy', ''))
# determine whether 'patch' file should be copied rather than applied
copy_patch = 'copy' in patch and not 'sourcepath' in patch
self.log.debug("Source index: %s; patch level: %s; source path suffix: %s; copy patch: %s",
srcind, level, srcpathsuffix, copy)
if beginpath is None:
try:
beginpath = self.src[srcind]['finalpath']
self.log.debug("Determine begin path for patch %s: %s" % (patch['name'], beginpath))
except IndexError, err:
raise EasyBuildError("Can't apply patch %s to source at index %s of list %s: %s",
patch['name'], srcind, self.src, err)
else:
self.log.debug("Using specified begin path for patch %s: %s" % (patch['name'], beginpath))
src = os.path.abspath("%s/%s" % (beginpath, srcpathsuffix))
self.log.debug("Applying patch %s in path %s" % (patch, src))
if not apply_patch(patch['path'], src, copy=copy_patch, level=level):
raise EasyBuildError("Applying patch %s failed", patch['name'])
def prepare_step(self):
"""
Pre-configure step. Set's up the builddir just before starting configure
"""
# clean environment, undefine any unwanted environment variables that may be harmful
self.cfg['unwanted_env_vars'] = env.unset_env_vars(self.cfg['unwanted_env_vars'])
# prepare toolchain: load toolchain module and dependencies, set up build environment
self.toolchain.prepare(self.cfg['onlytcmod'])
# guess directory to start configure/build/install process in, and move there
self.guess_start_dir()
def configure_step(self):
"""Configure build (abstract method)."""
raise NotImplementedError
def build_step(self):
"""Build software (abstract method)."""
raise NotImplementedError
def test_step(self):
"""Run unit tests provided by software (if any)."""
if self.cfg['runtest']:
self.log.debug("Trying to execute %s as a command for running unit tests...")
(out, _) = run_cmd(self.cfg['runtest'], log_all=True, simple=False)
return out
def stage_install_step(self):
"""
Install in a stage directory before actual installation.
"""
pass
def install_step(self):
"""Install built software (abstract method)."""
raise NotImplementedError
def extensions_step(self, fetch=False):
"""
After make install, run this.
- only if variable len(exts_list) > 0
- optionally: load module that was just created using temp module file
- find source for extensions, in 'extensions' (and 'packages' for legacy reasons)
- run extra_extensions
"""
if len(self.cfg['exts_list']) == 0:
self.log.debug("No extensions in exts_list")
return
# load fake module
fake_mod_data = self.load_fake_module(purge=True)
self.prepare_for_extensions()
if fetch:
self.exts = self.fetch_extension_sources()
self.exts_all = self.exts[:] # retain a copy of all extensions, regardless of filtering/skipping
if self.skip:
self.skip_extensions()
# actually install extensions
self.log.debug("Installing extensions")
exts_defaultclass = self.cfg['exts_defaultclass']
exts_classmap = self.cfg['exts_classmap']
# we really need a default class
if not exts_defaultclass:
self.clean_up_fake_module(fake_mod_data)
raise EasyBuildError("ERROR: No default extension class set for %s", self.name)
# obtain name and module path for default extention class
if hasattr(exts_defaultclass, '__iter__'):
self.log.nosupport("Module path for default class is explicitly defined", '2.0')
elif isinstance(exts_defaultclass, basestring):
# proper way: derive module path from specified class name
default_class = exts_defaultclass
default_class_modpath = get_module_path(default_class, generic=True)
else:
raise EasyBuildError("Improper default extension class specification, should be list/tuple or string.")
# get class instances for all extensions
for ext in self.exts:
self.log.debug("Starting extension %s" % ext['name'])
# always go back to original work dir to avoid running stuff from a dir that no longer exists
os.chdir(self.orig_workdir)
cls, inst = None, None
class_name = encode_class_name(ext['name'])
mod_path = get_module_path(class_name)
# try instantiating extension-specific class
try:
# no error when importing class fails, in case we run into an existing easyblock
# with a similar name (e.g., Perl Extension 'GO' vs 'Go' for which 'EB_Go' is available)
cls = get_easyblock_class(None, name=ext['name'], default_fallback=False, error_on_failed_import=False)
self.log.debug("Obtained class %s for extension %s" % (cls, ext['name']))
if cls is not None:
inst = cls(self, ext)
except (ImportError, NameError), err:
self.log.debug("Failed to use extension-specific class for extension %s: %s" % (ext['name'], err))
# alternative attempt: use class specified in class map (if any)
if inst is None and ext['name'] in exts_classmap:
class_name = exts_classmap[ext['name']]
mod_path = get_module_path(class_name)
try:
cls = get_class_for(mod_path, class_name)
inst = cls(self, ext)
except (ImportError, NameError), err:
raise EasyBuildError("Failed to load specified class %s for extension %s: %s",
class_name, ext['name'], err)
# fallback attempt: use default class
if inst is None:
try:
cls = get_class_for(default_class_modpath, default_class)
self.log.debug("Obtained class %s for installing extension %s" % (cls, ext['name']))
inst = cls(self, ext)
self.log.debug("Installing extension %s with default class %s (from %s)",
ext['name'], default_class, default_class_modpath)
except (ImportError, NameError), err:
raise EasyBuildError("Also failed to use default class %s from %s for extension %s: %s, giving up",
default_class, default_class_modpath, ext['name'], err)
else:
self.log.debug("Installing extension %s with class %s (from %s)" % (ext['name'], class_name, mod_path))
# real work
inst.prerun()
txt = inst.run()
if txt:
self.module_extra_extensions += txt
inst.postrun()
# append so we can make us of it later (in sanity_check_step)
self.ext_instances.append(inst)
# cleanup (unload fake module, remove fake module dir)
self.clean_up_fake_module(fake_mod_data)
def package_step(self):
"""Package installed software (e.g., into an RPM), if requested, using selected package tool."""
if build_option('package'):
pkgtype = build_option('package_type')
pkgdir_dest = os.path.abspath(package_path())
opt_force = build_option('force')
self.log.info("Generating %s package in %s", pkgtype, pkgdir_dest)
pkgdir_src = package(self)
mkdir(pkgdir_dest)
for src_file in glob.glob(os.path.join(pkgdir_src, "*.%s" % pkgtype)):
dest_file = os.path.join(pkgdir_dest, os.path.basename(src_file))
if os.path.exists(dest_file) and not opt_force:
raise EasyBuildError("Unable to copy package %s to %s (already exists).", src_file, dest_file)
else:
self.log.info("Copied package %s to %s", src_file, pkgdir_dest)
shutil.copy(src_file, pkgdir_dest)
else:
self.log.info("Skipping package step (not enabled)")
def post_install_step(self):
"""
Do some postprocessing
- run post install commands if any were specified
"""
if self.cfg['postinstallcmds'] is not None:
# make sure we have a list of commands
if not isinstance(self.cfg['postinstallcmds'], (list, tuple)):
raise EasyBuildError("Invalid value for 'postinstallcmds', should be list or tuple of strings.")
for cmd in self.cfg['postinstallcmds']:
if not isinstance(cmd, basestring):
raise EasyBuildError("Invalid element in 'postinstallcmds', not a string: %s", cmd)
run_cmd(cmd, simple=True, log_ok=True, log_all=True)
def sanity_check_step(self, custom_paths=None, custom_commands=None, extension=False):
"""
Do a sanity check on the installation
- if *any* of the files/subdirectories in the installation directory listed
in sanity_check_paths are non-existent (or empty), the sanity check fails
"""
# supported/required keys in for sanity check paths, along with function used to check the paths
path_keys_and_check = {
'files': lambda fp: os.path.exists(fp), # files must exist
'dirs': lambda dp: os.path.isdir(dp) and os.listdir(dp), # directories must exist and be non-empty
}
# prepare sanity check paths
paths = self.cfg['sanity_check_paths']
if not paths:
if custom_paths:
paths = custom_paths
self.log.info("Using customized sanity check paths: %s" % paths)
else:
paths = {}
for key in path_keys_and_check:
paths.setdefault(key, [])
paths.update({'dirs': ['bin', ('lib', 'lib64')]})
self.log.info("Using default sanity check paths: %s" % paths)
else:
self.log.info("Using specified sanity check paths: %s" % paths)
# check sanity check paths
ks = sorted(paths.keys())
valnottypes = [not isinstance(x, list) for x in paths.values()]
lenvals = [len(x) for x in paths.values()]
req_keys = sorted(path_keys_and_check.keys())
if not ks == req_keys or sum(valnottypes) > 0 or sum(lenvals) == 0:
raise EasyBuildError("Incorrect format for sanity_check_paths (should (only) have %s keys, "
"values should be lists (at least one non-empty)).", ','.join(req_keys))
for key, check_fn in path_keys_and_check.items():
for xs in paths[key]:
if isinstance(xs, basestring):
xs = (xs,)
elif not isinstance(xs, tuple):
raise EasyBuildError("Unsupported type '%s' encountered in %s, not a string or tuple",
key, type(xs))
found = False
for name in xs:
path = os.path.join(self.installdir, name)
if os.path.exists(path):
self.log.debug("Sanity check: found %s %s in %s" % (key[:-1], name, self.installdir))
found = True
break
else:
self.log.debug("Could not find %s %s in %s" % (key[:-1], name, self.installdir))
if not found:
self.sanity_check_fail_msgs.append("no %s of %s in %s" % (key[:-1], xs, self.installdir))
self.log.warning("Sanity check: %s" % self.sanity_check_fail_msgs[-1])
fake_mod_data = None
if not extension:
try:
# unload all loaded modules before loading fake module
# this ensures that loading of dependencies is tested, and avoids conflicts with build dependencies
fake_mod_data = self.load_fake_module(purge=True)
except EasyBuildError, err:
self.sanity_check_fail_msgs.append("loading fake module failed: %s" % err)
self.log.warning("Sanity check: %s" % self.sanity_check_fail_msgs[-1])
# chdir to installdir (better environment for running tests)
if os.path.isdir(self.installdir):
try:
os.chdir(self.installdir)
except OSError, err:
raise EasyBuildError("Failed to move to installdir %s: %s", self.installdir, err)
# run sanity check commands
commands = self.cfg['sanity_check_commands']
if not commands:
if custom_commands:
commands = custom_commands
self.log.info("Using customised sanity check commands: %s" % commands)
else:
commands = []
self.log.info("Using specified sanity check commands: %s" % commands)
for command in commands:
# set command to default. This allows for config files with
# non-tuple commands
if not isinstance(command, tuple):
self.log.debug("Setting sanity check command to default")
command = (None, None)
# Build substition dictionary
check_cmd = {'name': self.name.lower(), 'options': '-h'}
if command[0] is not None:
check_cmd['name'] = command[0]
if command[1] is not None:
check_cmd['options'] = command[1]
cmd = "%(name)s %(options)s" % check_cmd
out, ec = run_cmd(cmd, simple=False, log_ok=False, log_all=False)
if ec != 0:
self.sanity_check_fail_msgs.append("sanity check command %s exited with code %s (output: %s)" % (cmd, ec, out))
self.log.warning("Sanity check: %s" % self.sanity_check_fail_msgs[-1])
else:
self.log.debug("sanity check command %s ran successfully! (output: %s)" % (cmd, out))
if not extension:
failed_exts = [ext.name for ext in self.ext_instances if not ext.sanity_check_step()]
if failed_exts:
self.sanity_check_fail_msgs.append("sanity checks for %s extensions failed!" % failed_exts)
self.log.warning("Sanity check: %s" % self.sanity_check_fail_msgs[-1])
# cleanup
if fake_mod_data:
self.clean_up_fake_module(fake_mod_data)
# pass or fail
if self.sanity_check_fail_msgs:
raise EasyBuildError("Sanity check failed: %s", ', '.join(self.sanity_check_fail_msgs))
else:
self.log.debug("Sanity check passed!")
def cleanup_step(self):
"""
Cleanup leftover mess: remove/clean build directory
except when we're building in the installation directory or
cleanup_builddir is False, otherwise we remove the installation
"""
if not self.build_in_installdir and build_option('cleanup_builddir'):
try:
os.chdir(self.orig_workdir) # make sure we're out of the dir we're removing
self.log.info("Cleaning up builddir %s (in %s)" % (self.builddir, os.getcwd()))
rmtree2(self.builddir)
base = os.path.dirname(self.builddir)
# keep removing empty directories until we either find a non-empty one
# or we end up in the root builddir
while len(os.listdir(base)) == 0 and not os.path.samefile(base, build_path()):
os.rmdir(base)
base = os.path.dirname(base)
except OSError, err:
raise EasyBuildError("Cleaning up builddir %s failed: %s", self.builddir, err)
if not build_option('cleanup_builddir'):
self.log.info("Keeping builddir %s" % self.builddir)
env.restore_env_vars(self.cfg['unwanted_env_vars'])
def make_module_step(self, fake=False):
"""
Generate a module file.
"""
modpath = self.module_generator.prepare(fake=fake)
txt = self.make_module_description()
txt += self.make_module_dep()
txt += self.make_module_extend_modpath()
txt += self.make_module_req()
txt += self.make_module_extra()
txt += self.make_module_footer()
mod_filepath = self.module_generator.get_module_filepath(fake=fake)
write_file(mod_filepath, txt)
self.log.info("Module file %s written: %s", mod_filepath, txt)
# only update after generating final module file
if not fake:
self.modules_tool.update()
mod_symlink_paths = ActiveMNS().det_module_symlink_paths(self.cfg)
self.module_generator.create_symlinks(mod_symlink_paths, fake=fake)
if not fake:
self.make_devel_module()
return modpath
def permissions_step(self):
"""
Finalize installation procedure: adjust permissions as configured, change group ownership (if requested).
Installing user must be member of the group that it is changed to.
"""
if self.group is not None:
# remove permissions for others, and set group ID
try:
perms = stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
adjust_permissions(self.installdir, perms, add=False, recursive=True, group_id=self.group[1],
relative=True, ignore_errors=True)
except EasyBuildError, err:
raise EasyBuildError("Unable to change group permissions of file(s): %s", err)
self.log.info("Successfully made software only available for group %s (gid %s)" % self.group)
if build_option('read_only_installdir'):
# remove write permissions for everyone
perms = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
adjust_permissions(self.installdir, perms, add=False, recursive=True, relative=True, ignore_errors=True)
self.log.info("Successfully removed write permissions recursively for *EVERYONE* on install dir.")
elif build_option('group_writable_installdir'):
# enable write permissions for group
perms = stat.S_IWGRP
adjust_permissions(self.installdir, perms, add=True, recursive=True, relative=True, ignore_errors=True)
self.log.info("Successfully enabled write permissions recursively for group on install dir.")
else:
# remove write permissions for group and other
perms = stat.S_IWGRP | stat.S_IWOTH
adjust_permissions(self.installdir, perms, add=False, recursive=True, relative=True, ignore_errors=True)
self.log.info("Successfully removed write permissions recursively for group/other on install dir.")
def test_cases_step(self):
"""
Run provided test cases.
"""
for test in self.cfg['tests']:
os.chdir(self.orig_workdir)
if os.path.isabs(test):
path = test
else:
for source_path in source_paths():
path = os.path.join(source_path, self.name, test)
if os.path.exists(path):
break
if not os.path.exists(path):
raise EasyBuildError("Test specifies invalid path: %s", path)
try:
self.log.debug("Running test %s" % path)
run_cmd(path, log_all=True, simple=True)
except EasyBuildError, err:
raise EasyBuildError("Running test %s failed: %s", path, err)
def update_config_template_run_step(self):
"""Update the the easyconfig template dictionary with easyconfig.TEMPLATE_NAMES_EASYBLOCK_RUN_STEP names"""
for name in TEMPLATE_NAMES_EASYBLOCK_RUN_STEP:
self.cfg.template_values[name[0]] = str(getattr(self, name[0], None))
self.cfg.generate_template_values()
def _skip_step(self, step, skippable):
"""Dedice whether or not to skip the specified step."""
module_only = build_option('module_only')
force = build_option('force')
skip = False
# skip step if specified as individual (skippable) step
if skippable and (self.skip or step in self.cfg['skipsteps']):
self.log.info("Skipping %s step (skip: %s, skipsteps: %s)", step, self.skip, self.cfg['skipsteps'])
skip = True
# skip step when only generating module file
# * still run sanity check without use of force
# * always run ready & prepare step to set up toolchain + deps
elif module_only and not step in MODULE_ONLY_STEPS:
self.log.info("Skipping %s step (only generating module)", step)
skip = True
# allow skipping sanity check too when only generating module and force is used
elif module_only and step == SANITYCHECK_STEP and force:
self.log.info("Skipping %s step because of forced module-only mode", step)
skip = True
else:
self.log.debug("Not skipping %s step (skippable: %s, skip: %s, skipsteps: %s, module_only: %s, force: %s",
step, skippable, self.skip, self.cfg['skipsteps'], module_only, force)
return skip
def run_step(self, step, methods):
"""
Run step, returns false when execution should be stopped
"""
self.log.info("Starting %s step", step)
self.update_config_template_run_step()
for m in methods:
self.log.info("Running method %s part of step %s" % ('_'.join(m.func_code.co_names), step))
m(self)
if self.cfg['stop'] == step:
self.log.info("Stopping after %s step.", step)
raise StopException(step)
@staticmethod
def get_steps(run_test_cases=True, iteration_count=1):
"""Return a list of all steps to be performed."""
def get_step(tag, descr, substeps, skippable, initial=True):
"""Determine step definition based on whether it's an initial run or not."""
substeps = [substep for (always_include, substep) in substeps if (initial or always_include)]
return (tag, descr, substeps, skippable)
# list of substeps for steps that are slightly different from 2nd iteration onwards
ready_substeps = [
(False, lambda x: x.check_readiness_step()),
(True, lambda x: x.make_builddir()),
(True, lambda x: env.reset_changes()),
(True, lambda x: x.handle_iterate_opts()),
]
ready_step_spec = lambda initial: get_step(READY_STEP, "creating build dir, resetting environment",
ready_substeps, False, initial=initial)
source_substeps = [
(False, lambda x: x.checksum_step()),
(True, lambda x: x.extract_step()),
]
source_step_spec = lambda initial: get_step(SOURCE_STEP, "unpacking", source_substeps, True, initial=initial)
def prepare_step_spec(initial):
"""Return prepare step specification."""
if initial:
substeps = [lambda x: x.prepare_step()]
else:
substeps = [lambda x: x.guess_start_dir()]
return (PREPARE_STEP, 'preparing', substeps, False)
install_substeps = [
(False, lambda x: x.stage_install_step()),
(False, lambda x: x.make_installdir()),
(True, lambda x: x.install_step()),
]
install_step_spec = lambda initial: get_step('install', "installing", install_substeps, True, initial=initial)
# format for step specifications: (stop_name: (description, list of functions, skippable))
# core steps that are part of the iterated loop
patch_step_spec = (PATCH_STEP, 'patching', [lambda x: x.patch_step()], True)
configure_step_spec = (CONFIGURE_STEP, 'configuring', [lambda x: x.configure_step()], True)
build_step_spec = (BUILD_STEP, 'building', [lambda x: x.build_step()], True)
test_step_spec = (TEST_STEP, 'testing', [lambda x: x.test_step()], True)
# part 1: pre-iteration + first iteration
steps_part1 = [
(FETCH_STEP, 'fetching files', [lambda x: x.fetch_step()], False),
ready_step_spec(True),
source_step_spec(True),
patch_step_spec,
prepare_step_spec(True),
configure_step_spec,
build_step_spec,
test_step_spec,
install_step_spec(True),
]
# part 2: iterated part, from 2nd iteration onwards
# repeat core procedure again depending on specified iteration count
# not all parts of all steps need to be rerun (see e.g., ready, prepare)
steps_part2 = [
ready_step_spec(False),
source_step_spec(False),
patch_step_spec,
prepare_step_spec(False),
configure_step_spec,
build_step_spec,
test_step_spec,
install_step_spec(False),
] * (iteration_count - 1)
# part 3: post-iteration part
steps_part3 = [
(EXTENSIONS_STEP, 'taking care of extensions', [lambda x: x.extensions_step()], False),
(POSTPROC_STEP, 'postprocessing', [lambda x: x.post_install_step()], True),
(SANITYCHECK_STEP, 'sanity checking', [lambda x: x.sanity_check_step()], False),
(CLEANUP_STEP, 'cleaning up', [lambda x: x.cleanup_step()], False),
(MODULE_STEP, 'creating module', [lambda x: x.make_module_step()], False),
(PERMISSIONS_STEP, 'permissions', [lambda x: x.permissions_step()], False),
(PACKAGE_STEP, 'packaging', [lambda x: x.package_step()], False),
]
# full list of steps, included iterated steps
steps = steps_part1 + steps_part2 + steps_part3
if run_test_cases:
steps.append((TESTCASES_STEP, 'running test cases', [
lambda x: x.load_module(),
lambda x: x.test_cases_step(),
], False))
return steps
def run_all_steps(self, run_test_cases):
"""
Build and install this software.
run_test_cases (bool): run tests after building (e.g.: make test)
"""
if self.cfg['stop'] and self.cfg['stop'] == 'cfg':
return True
steps = self.get_steps(run_test_cases=run_test_cases, iteration_count=self.det_iter_cnt())
print_msg("building and installing %s..." % self.full_mod_name, self.log, silent=self.silent)
try:
for (step_name, descr, step_methods, skippable) in steps:
if self._skip_step(step_name, skippable):
print_msg("%s [skipped]" % descr, self.log, silent=self.silent)
else:
print_msg("%s..." % descr, self.log, silent=self.silent)
self.run_step(step_name, step_methods)
except StopException:
pass
# return True for successfull build (or stopped build)
return True
def build_and_install_one(ecdict, init_env):
"""
Build the software
@param ecdict: dictionary contaning parsed easyconfig + metadata
@param init_env: original environment (used to reset environment)
"""
silent = build_option('silent')
spec = ecdict['spec']
rawtxt = ecdict['ec'].rawtxt
name = ecdict['ec']['name']
print_msg("processing EasyBuild easyconfig %s" % spec, log=_log, silent=silent)
# restore original environment
_log.info("Resetting environment")
filetools.errors_found_in_log = 0
restore_env(init_env)
cwd = os.getcwd()
# load easyblock
easyblock = build_option('easyblock')
if not easyblock:
easyblock = fetch_parameters_from_easyconfig(rawtxt, ['easyblock'])[0]
try:
app_class = get_easyblock_class(easyblock, name=name)
app = app_class(ecdict['ec'])
_log.info("Obtained application instance of for %s (easyblock: %s)" % (name, easyblock))
except EasyBuildError, err:
print_error("Failed to get application instance for %s (easyblock: %s): %s" % (name, easyblock, err.msg),
silent=silent)
# application settings
stop = build_option('stop')
if stop is not None:
_log.debug("Stop set to %s" % stop)
app.cfg['stop'] = stop
skip = build_option('skip')
if skip is not None:
_log.debug("Skip set to %s" % skip)
app.cfg['skip'] = skip
# build easyconfig
errormsg = '(no error)'
# timing info
start_time = time.time()
try:
run_test_cases = not build_option('skip_test_cases') and app.cfg['tests']
result = app.run_all_steps(run_test_cases=run_test_cases)
except EasyBuildError, err:
first_n = 300
errormsg = "build failed (first %d chars): %s" % (first_n, err.msg[:first_n])
_log.warning(errormsg)
result = False
ended = "ended"
# make sure we're back in original directory before we finish up
os.chdir(cwd)
# successful build
if result:
if app.cfg['stop']:
ended = "STOPPED"
if app.builddir is not None:
new_log_dir = os.path.join(app.builddir, config.log_path())
else:
new_log_dir = os.path.dirname(app.logfile)
else:
new_log_dir = os.path.join(app.installdir, config.log_path())
if build_option('read_only_installdir'):
# temporarily re-enable write permissions for copying log/easyconfig to install dir
adjust_permissions(new_log_dir, stat.S_IWUSR, add=True, recursive=False)
# collect build stats
_log.info("Collecting build stats...")
buildstats = get_build_stats(app, start_time, build_option('command_line'))
_log.info("Build stats: %s" % buildstats)
try:
# upload spec to central repository
currentbuildstats = app.cfg['buildstats']
repo = init_repository(get_repository(), get_repositorypath())
if 'original_spec' in ecdict:
block = det_full_ec_version(app.cfg) + ".block"
repo.add_easyconfig(ecdict['original_spec'], app.name, block, buildstats, currentbuildstats)
repo.add_easyconfig(spec, app.name, det_full_ec_version(app.cfg), buildstats, currentbuildstats)
repo.commit("Built %s" % app.full_mod_name)
del repo
except EasyBuildError, err:
_log.warn("Unable to commit easyconfig to repository: %s", err)
success = True
succ = "successfully"
summary = "COMPLETED"
# cleanup logs
app.close_log()
log_fn = os.path.basename(get_log_filename(app.name, app.version))
application_log = os.path.join(new_log_dir, log_fn)
move_logs(app.logfile, application_log)
try:
newspec = os.path.join(new_log_dir, "%s-%s.eb" % (app.name, det_full_ec_version(app.cfg)))
# only copy if the files are not the same file already (yes, it happens)
if os.path.exists(newspec) and os.path.samefile(spec, newspec):
_log.debug("Not copying easyconfig file %s to %s since files are identical" % (spec, newspec))
else:
shutil.copy(spec, newspec)
_log.debug("Copied easyconfig file %s to %s" % (spec, newspec))
except (IOError, OSError), err:
print_error("Failed to copy easyconfig %s to %s: %s" % (spec, newspec, err))
if build_option('read_only_installdir'):
# take away user write permissions (again)
adjust_permissions(new_log_dir, stat.S_IWUSR, add=False, recursive=False)
# build failed
else:
success = False
summary = "FAILED"
build_dir = ''
if app.builddir:
build_dir = " (build directory: %s)" % (app.builddir)
succ = "unsuccessfully%s: %s" % (build_dir, errormsg)
# cleanup logs
app.close_log()
application_log = app.logfile
print_msg("%s: Installation %s %s" % (summary, ended, succ), log=_log, silent=silent)
# check for errors
if filetools.errors_found_in_log > 0:
print_msg("WARNING: %d possible error(s) were detected in the "
"build logs, please verify the build." % filetools.errors_found_in_log,
_log, silent=silent)
if app.postmsg:
print_msg("\nWARNING: %s\n" % app.postmsg, _log, silent=silent)
print_msg("Results of the build can be found in the log file %s" % application_log, _log, silent=silent)
del app
return (success, application_log, errormsg)
def get_easyblock_instance(ecdict):
"""
Get an instance for this easyconfig
@param easyconfig: parsed easyconfig (EasyConfig instance)
returns an instance of EasyBlock (or subclass thereof)
"""
spec = ecdict['spec']
rawtxt = ecdict['ec'].rawtxt
name = ecdict['ec']['name']
# handle easyconfigs with custom easyblocks
# determine easyblock specification from easyconfig file, if any
easyblock = fetch_parameters_from_easyconfig(rawtxt, ['easyblock'])[0]
app_class = get_easyblock_class(easyblock, name=name)
return app_class(ecdict['ec'])
def build_easyconfigs(easyconfigs, output_dir, test_results):
"""Build the list of easyconfigs."""
build_stopped = {}
apploginfo = lambda x, y: x.log.info(y)
def perform_step(step, obj, method, logfile):
"""Perform method on object if it can be built."""
if (isinstance(obj, dict) and obj['spec'] not in build_stopped) or obj not in build_stopped:
# update templates before every step (except for initialization)
if isinstance(obj, EasyBlock):
obj.update_config_template_run_step()
try:
if step == 'initialization':
_log.info("Running %s step" % step)
return get_easyblock_instance(obj)
else:
apploginfo(obj, "Running %s step" % step)
method(obj)
except Exception, err: # catch all possible errors, also crashes in EasyBuild code itself
fullerr = str(err)
if not isinstance(err, EasyBuildError):
tb = traceback.format_exc()
fullerr = '\n'.join([tb, str(err)])
# we cannot continue building it
if step == 'initialization':
obj = obj['spec']
test_results.append((obj, step, fullerr, logfile))
# keep a dict of so we can check in O(1) if objects can still be build
build_stopped[obj] = step
# initialize all instances
apps = []
for ec in easyconfigs:
instance = perform_step('initialization', ec, None, _log)
apps.append(instance)
base_dir = os.getcwd()
# keep track of environment right before initiating builds
# note: may be different from ORIG_OS_ENVIRON, since EasyBuild may have defined additional env vars itself by now
# e.g. via easyconfig.handle_allowed_system_deps
base_env = copy.deepcopy(os.environ)
succes = []
for app in apps:
# if initialisation step failed, app will be None
if app:
applog = os.path.join(output_dir, "%s-%s.log" % (app.name, det_full_ec_version(app.cfg)))
start_time = time.time()
# start with a clean slate
os.chdir(base_dir)
restore_env(base_env)
steps = EasyBlock.get_steps(iteration_count=app.det_iter_cnt())
for (step_name, _, step_methods, skippable) in steps:
if skippable and step_name in app.cfg['skipsteps']:
_log.info("Skipping step %s" % step_name)
else:
for step_method in step_methods:
method_name = '_'.join(step_method.func_code.co_names)
perform_step('_'.join([step_name, method_name]), app, step_method, applog)
# close log and move it
app.close_log()
move_logs(app.logfile, applog)
if app not in build_stopped:
# gather build stats
buildstats = get_build_stats(app, start_time, build_option('command_line'))
succes.append((app, buildstats))
for result in test_results:
_log.info("%s crashed with an error during fase: %s, error: %s, log file: %s" % result)
failed = len(build_stopped)
total = len(apps)
_log.info("%s of %s packages failed to build!" % (failed, total))
output_file = os.path.join(output_dir, "easybuild-test.xml")
_log.debug("writing xml output to %s" % output_file)
write_to_xml(succes, test_results, output_file)
return failed == 0
class StopException(Exception):
"""
StopException class definition.
"""
pass
|
torbjoernk/easybuild-framework
|
easybuild/framework/easyblock.py
|
Python
|
gpl-2.0
| 93,981
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2013, 2019-2020 Laurent Monin
# Copyright (C) 2014, 2017 Sophist-UK
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2018-2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
from test.picardtestcase import PicardTestCase
from picard.i18n import setup_gettext
from picard.util import bytes2human
class Testbytes2human(PicardTestCase):
def setUp(self):
super().setUp()
# we are using temporary locales for tests
self.tmp_path = self.mktmpdir()
self.localedir = os.path.join(self.tmp_path, 'locale')
def test_00(self):
# testing with default C locale, english
lang = 'C'
setup_gettext(self.localedir, lang)
self.run_test(lang)
self.assertEqual(bytes2human.binary(45682), '44.6 KiB')
self.assertEqual(bytes2human.binary(-45682), '-44.6 KiB')
self.assertEqual(bytes2human.binary(-45682, 2), '-44.61 KiB')
self.assertEqual(bytes2human.decimal(45682), '45.7 kB')
self.assertEqual(bytes2human.decimal(45682, 2), '45.68 kB')
self.assertEqual(bytes2human.decimal(9223372036854775807), '9223.4 PB')
self.assertEqual(bytes2human.decimal(9223372036854775807, 3), '9223.372 PB')
self.assertEqual(bytes2human.decimal(123.6), '123 B')
self.assertRaises(ValueError, bytes2human.decimal, 'xxx')
self.assertRaises(ValueError, bytes2human.decimal, '123.6')
self.assertRaises(ValueError, bytes2human.binary, 'yyy')
self.assertRaises(ValueError, bytes2human.binary, '456yyy')
try:
bytes2human.decimal('123')
except Exception as e:
self.fail('Unexpected exception: %s' % e)
def test_calc_unit_raises_value_error(self):
self.assertRaises(ValueError, bytes2human.calc_unit, 1, None)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 100)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 999)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 1023)
self.assertRaises(ValueError, bytes2human.calc_unit, 1, 1025)
self.assertEqual((1.0, 'B'), bytes2human.calc_unit(1, 1024))
self.assertEqual((1.0, 'B'), bytes2human.calc_unit(1, 1000))
def run_test(self, lang='C', create_test_data=False):
"""
Compare data generated with sample files
Setting create_test_data to True will generated sample files
from code execution (developer-only, check carefully)
"""
filename = os.path.join('test', 'data', 'b2h_test_%s.dat' % lang)
testlist = self._create_testlist()
if create_test_data:
self._save_expected_to(filename, testlist)
expected = self._read_expected_from(filename)
self.assertEqual(testlist, expected)
if create_test_data:
# be sure it is disabled
self.fail('!!! UNSET create_test_data mode !!! (%s)' % filename)
@staticmethod
def _create_testlist():
values = [0, 1]
for n in [1000, 1024]:
p = 1
for e in range(0, 6):
p *= n
for x in [0.1, 0.5, 0.99, 0.9999, 1, 1.5]:
values.append(int(p * x))
list = []
for x in sorted(values):
list.append(";".join([str(x), bytes2human.decimal(x),
bytes2human.binary(x),
bytes2human.short_string(x, 1024, 2)]))
return list
@staticmethod
def _save_expected_to(path, a_list):
with open(path, 'wb') as f:
f.writelines([line + "\n" for line in a_list])
f.close()
@staticmethod
def _read_expected_from(path):
with open(path, 'r') as f:
lines = [line.rstrip("\n") for line in f.readlines()]
f.close()
return lines
def test_calc_unit(self):
self.assertEqual(bytes2human.calc_unit(12456, 1024), (12.1640625, 'KiB'))
self.assertEqual(bytes2human.calc_unit(-12456, 1000), (-12.456, 'kB'))
self.assertRaises(ValueError, bytes2human.calc_unit, 0, 1001)
|
musicbrainz/picard
|
test/test_bytes2human.py
|
Python
|
gpl-2.0
| 4,925
|
import math
from math import *
#We have put this in the same order as the types in the LeapSDK.
finger_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
'''
Function that returns the palm orientation
@hand: the hand that we're analysing.
'''
def palmOrientation (hand):
palmNormal = hand.palm_normal
y = palmNormal[1]
x = palmNormal[0]
#We have divided the unit sphere in four quadrants and we check the palm_normal's quadrant
if cos(math.pi/4) <= y and y <= 1:
return 'up'
elif -1 <= y and y <= cos(3*math.pi/4):
return 'down'
elif x > 0:
return 'right'
else:
return 'left'
'''
Function that returns a finger from a hand.
@hand: the hand whose finger we want to get.
@finger_type: the finger that we want to get.
'''
def getFinger(hand, finger_type):
#We try to find the finger in the hand.
for finger in hand.fingers:
if finger_type == finger_names[finger.type]:
return finger
'''
Function that returns the tip position of the selected finger
@hand: the hand that we're analysing
@finger_type: the finger whose tip's position we want to get
'''
def getTipPosition(hand, finger_type):
finger = getFinger(hand, finger_type)
return finger.bone(3).next_joint
|
LuisSuall/Sleight_of_hand
|
Finger_run_II/utils/extendedHand.py
|
Python
|
gpl-2.0
| 1,259
|
#=====================================================
# Copyright (C) 2011 Andrea Arteaga <andyspiros@gmail.com>
#=====================================================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import benchconfig as cfg
from utils import benchutils as bu
from os.path import dirname, join as pjoin
class _Print:
def __init__(self, logfile, maxlevel=10):
self._level = 0
self._maxlevel = maxlevel
self._logfile = logfile
def __call__(self, arg='', end='\n'):
printstr = str(arg) + end
if self._level > 0:
printstr = (self._level - 1) * " " + "-- " + printstr
# Print to logfile
bu.mkdir(dirname(self._logfile))
logfile = file(self._logfile, 'a')
print >> logfile, printstr,
logfile.close()
# Print to terminal
if self._level <= self._maxlevel:
print printstr,
def up(self, n=1):
self._level = max(self._level - n, 0)
def down(self, n=1):
self._level = max(self._level + n, 0)
# Uninitialized object (wait for argument parsing, directories lookup,... )
Print = None
def initializePrint():
global Print
Print = _Print(pjoin(cfg.logdir, 'main.log'), 3)
return Print
|
andyspiros/numbench
|
numbench/benchprint.py
|
Python
|
gpl-2.0
| 1,916
|
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from snippets.v3_0 import views
urlpatterns = patterns('',
url(r'^v3_0/snippets/$', views.SnippetList.as_view()),
url(r'^v3_0/snippets/(?P<pk>[0-9]+)/$', views.SnippetDetail.as_view()),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
lgarest/django_snippets_api
|
snippets/v3_0/urls.py
|
Python
|
gpl-2.0
| 353
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zeltlager_registration', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='jugendgruppe',
name='address',
),
migrations.DeleteModel(
name='Jugendgruppe',
),
migrations.RemoveField(
model_name='zeltlagerdurchgang',
name='address',
),
migrations.RemoveField(
model_name='zeltlagerdurchgang',
name='description',
),
]
|
jjbgf/eventbooking
|
zeltlager_registration/migrations/0002_auto_20150211_2011.py
|
Python
|
gpl-2.0
| 675
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ArtistMedia.is_default_image'
db.delete_column(u'artist_artistmedia', 'is_default_image')
# Deleting field 'ArtistMedia.name'
db.delete_column(u'artist_artistmedia', 'name')
# Deleting field 'ArtistMedia.video_link'
db.delete_column(u'artist_artistmedia', 'video_link')
# Deleting field 'ArtistMedia.full_res_image'
db.delete_column(u'artist_artistmedia', 'full_res_image')
# Deleting field 'ArtistMedia.image'
db.delete_column(u'artist_artistmedia', 'image')
# Deleting field 'ArtistMedia.id'
db.delete_column(u'artist_artistmedia', u'id')
# Deleting field 'ArtistMedia.thumbnail'
db.delete_column(u'artist_artistmedia', 'thumbnail')
# Adding field 'ArtistMedia.frontmedia_ptr'
db.add_column(u'artist_artistmedia', u'frontmedia_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(default=-1, to=orm['front_material.FrontMedia'], unique=True, primary_key=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'ArtistMedia.is_default_image'
db.add_column(u'artist_artistmedia', 'is_default_image',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'ArtistMedia.name'
db.add_column(u'artist_artistmedia', 'name',
self.gf('django.db.models.fields.CharField')(default='', max_length=100),
keep_default=False)
# Adding field 'ArtistMedia.video_link'
db.add_column(u'artist_artistmedia', 'video_link',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.full_res_image'
db.add_column(u'artist_artistmedia', 'full_res_image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.image'
db.add_column(u'artist_artistmedia', 'image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.id'
db.add_column(u'artist_artistmedia', u'id',
self.gf('django.db.models.fields.AutoField')(default=1, primary_key=True),
keep_default=False)
# Adding field 'ArtistMedia.thumbnail'
db.add_column(u'artist_artistmedia', 'thumbnail',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Deleting field 'ArtistMedia.frontmedia_ptr'
db.delete_column(u'artist_artistmedia', u'frontmedia_ptr_id')
models = {
u'artist.artist': {
'Meta': {'object_name': 'Artist'},
'artist_statement': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'artist.artistmedia': {
'Meta': {'object_name': 'ArtistMedia', '_ormbases': [u'front_material.FrontMedia']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['artist.Artist']"}),
u'frontmedia_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['front_material.FrontMedia']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'front_material.frontmedia': {
'Meta': {'object_name': 'FrontMedia'},
'full_res_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_default_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['artist']
|
hcwiley/the-front
|
the_front/the_front/artist/migrations/0005_auto__del_field_artistmedia_is_default_image__del_field_artistmedia_na.py
|
Python
|
gpl-2.0
| 8,560
|
from Personne import *
class AgentSpecial(Personne):
"""
Exemple d'héritage simple
--->>>> l'héritage multiple est possible
-> hérite des exceptions
Classe définissant un agent spécial.
Elle hérite de la classe Personne"""
def __init__(self, nom, matricule):
"""Un agent se définit par son nom et son matricule"""
self.nom = nom
self.matricule = matricule
#Personne.__init__(self, nom)
def __str__(self):
"""Méthode appelée lors d'une conversion de l'objet en chaîne"""
return "Agent {0}, matricule {1}".format(self.nom, self.matricule)
|
syncrase/pythonScriptTest
|
AgentSpecial.py
|
Python
|
gpl-2.0
| 617
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from util import build_utils
from util import proguard_util
_DANGEROUS_OPTIMIZATIONS = [
"class/unboxing/enum",
# See crbug.com/625992
"code/allocation/variable",
# See crbug.com/625994
"field/propagation/value",
"method/propagation/parameter",
"method/propagation/returnvalue",
]
def _ParseOptions(args):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--proguard-path',
help='Path to the proguard executable.')
parser.add_option('--input-paths',
help='Paths to the .jar files proguard should run on.')
parser.add_option('--output-path', help='Path to the generated .jar file.')
parser.add_option('--proguard-configs', action='append',
help='Paths to proguard configuration files.')
parser.add_option('--proguard-config-exclusions',
default='',
help='GN list of paths to proguard configuration files '
'included by --proguard-configs, but that should '
'not actually be included.')
parser.add_option('--mapping', help='Path to proguard mapping to apply.')
parser.add_option('--is-test', action='store_true',
help='If true, extra proguard options for instrumentation tests will be '
'added.')
parser.add_option('--classpath', action='append',
help='Classpath for proguard.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--enable-dangerous-optimizations', action='store_true',
help='Enable optimizations which are known to have issues.')
parser.add_option('--verbose', '-v', action='store_true',
help='Print all proguard output')
options, _ = parser.parse_args(args)
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGnList(arg)
options.classpath = classpath
configs = []
for arg in options.proguard_configs:
configs += build_utils.ParseGnList(arg)
options.proguard_configs = configs
options.proguard_config_exclusions = (
build_utils.ParseGnList(options.proguard_config_exclusions))
options.input_paths = build_utils.ParseGnList(options.input_paths)
return options
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseOptions(args)
proguard = proguard_util.ProguardCmdBuilder(options.proguard_path)
proguard.injars(options.input_paths)
proguard.configs(options.proguard_configs)
proguard.config_exclusions(options.proguard_config_exclusions)
proguard.outjar(options.output_path)
if options.mapping:
proguard.mapping(options.mapping)
classpath = list(set(options.classpath))
proguard.libraryjars(classpath)
proguard.verbose(options.verbose)
if not options.enable_dangerous_optimizations:
proguard.disable_optimizations(_DANGEROUS_OPTIMIZATIONS)
build_utils.CallAndWriteDepfileIfStale(
proguard.CheckOutput,
options,
input_paths=proguard.GetInputs(),
input_strings=proguard.build(),
output_paths=proguard.GetOutputs(),
depfile_deps=proguard.GetDepfileDeps())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Passw/gn_GFW
|
build/android/gyp/proguard.py
|
Python
|
gpl-3.0
| 3,458
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "labs_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
heldergg/labs
|
labs_django/manage.py
|
Python
|
gpl-3.0
| 254
|
from django.contrib import admin
from eventex.core.models import Talk
from eventex.core.models import Course
from eventex.core.models import Speaker
from eventex.core.models import Contact
class ContactInline(admin.TabularInline):
model = Contact
extra = 1
class SpeakerModelAdmin(admin.ModelAdmin):
inlines = [ContactInline]
list_display = ['name', 'website_link', 'photo_img', 'email', 'phone']
prepopulated_fields = {'slug': ['name']}
def website_link(self, obj):
return '<a href="{0}">{0}</a>'.format(obj.website)
website_link.allow_tags = True
website_link.short_description = 'Website'
def photo_img(self, obj):
return '<img width="32px" src="{}"/>'.format(obj.photo)
photo_img.allow_tags = True
photo_img.short_description = 'Foto'
def email(self, obj):
return obj.contact_set.emails().first()
email.short_description = 'E-mail'
def phone(self, obj):
return obj.contact_set.phones().first()
phone.short_description = 'Telefone'
class TalkModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.filter(course=None)
admin.site.register(Speaker, SpeakerModelAdmin)
admin.site.register(Talk, TalkModelAdmin)
admin.site.register(Course)
|
adrianomargarin/wttd-eventex
|
eventex/core/admin.py
|
Python
|
gpl-3.0
| 1,325
|
from braces.views import LoginRequiredMixin
from django.views.generic import UpdateView
from oauth2_provider.exceptions import OAuthToolkitError
from oauth2_provider.http import HttpResponseUriRedirect
from oauth2_provider.models import get_application_model as get_oauth2_application_model
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views import AuthorizationView
from oauth2_provider.views.application import ApplicationRegistration
from core.utils import get_default_scopes
from .forms import RegistrationForm
class ApplicationRegistrationView(ApplicationRegistration):
form_class = RegistrationForm
class ApplicationUpdateView(LoginRequiredMixin, UpdateView):
"""
View used to update an application owned by the request.user
"""
form_class = RegistrationForm
context_object_name = 'application'
template_name = "oauth2_provider/application_form.html"
def get_queryset(self):
return get_oauth2_application_model().objects.filter(user=self.request.user)
class CustomAuthorizationView(AuthorizationView):
def form_valid(self, form):
client_id = form.cleaned_data.get('client_id', '')
application = get_oauth2_application_model().objects.get(client_id=client_id)
scopes = form.cleaned_data.get('scope', '')
scopes = set(scopes.split(' '))
scopes.update(set(get_default_scopes(application)))
private_scopes = application.private_scopes
if private_scopes:
private_scopes = set(private_scopes.split(' '))
scopes.update(private_scopes)
scopes = ' '.join(list(scopes))
form.cleaned_data['scope'] = scopes
return super(CustomAuthorizationView, self).form_valid(form)
def get(self, request, *args, **kwargs):
"""
Copied blatantly from super method. Had to change few stuff, but didn't find better way
than copying and editing the whole stuff.
Sin Count += 1
"""
try:
scopes, credentials = self.validate_authorization_request(request)
try:
del credentials['request']
# Removing oauthlib.Request from credentials. This is not required in future
except KeyError: # pylint: disable=pointless-except
pass
kwargs['scopes_descriptions'] = [oauth2_settings.SCOPES[scope] for scope in scopes]
kwargs['scopes'] = scopes
# at this point we know an Application instance with such client_id exists in the database
application = get_oauth2_application_model().objects.get(
client_id=credentials['client_id']) # TODO: cache it!
kwargs['application'] = application
kwargs.update(credentials)
self.oauth2_data = kwargs
# following two loc are here only because of https://code.djangoproject.com/ticket/17795
form = self.get_form(self.get_form_class())
kwargs['form'] = form
# Check to see if the user has already granted access and return
# a successful response depending on 'approval_prompt' url parameter
require_approval = request.GET.get('approval_prompt', oauth2_settings.REQUEST_APPROVAL_PROMPT)
# If skip_authorization field is True, skip the authorization screen even
# if this is the first use of the application and there was no previous authorization.
# This is useful for in-house applications-> assume an in-house applications
# are already approved.
if application.skip_authorization:
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes),
credentials=credentials, allow=True)
return HttpResponseUriRedirect(uri)
elif require_approval == 'auto':
tokens = request.user.accesstoken_set.filter(application=kwargs['application']).all().order_by('-id')
if len(tokens) > 0:
token = tokens[0]
if len(tokens) > 1:
# Enforce one token pair per user policy. Remove all older tokens
request.user.accesstoken_set.exclude(pk=token.id).all().delete()
# check past authorizations regarded the same scopes as the current one
if token.allow_scopes(scopes):
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes),
credentials=credentials, allow=True)
return HttpResponseUriRedirect(uri)
return self.render_to_response(self.get_context_data(**kwargs))
except OAuthToolkitError as error:
return self.error_response(error)
|
DheerendraRathor/ldap-oauth2
|
application/views.py
|
Python
|
gpl-3.0
| 4,953
|
# Author: matigonkas
# URL: https://github.com/SiCKRAGETV/sickrage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
class STRIKEProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Strike")
self.supportsBacklog = True
self.public = True
self.url = 'https://getstrike.net/'
self.ratio = 0
self.cache = StrikeCache(self)
self.minseed, self.minleech = 2 * [None]
def isEnabled(self):
return self.enabled
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys(): #Mode = RSS, Season, Episode
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)
searchURL = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
jdata = self.getURL(searchURL, json=True)
if not jdata:
logger.log("No data returned from provider", logger.DEBUG)
return []
results = []
for item in jdata['torrents']:
seeders = ('seeds' in item and item['seeds']) or 0
leechers = ('leeches' in item and item['leeches']) or 0
title = ('torrent_title' in item and item['torrent_title']) or ''
size = ('size' in item and item['size']) or 0
download_url = ('magnet_uri' in item and item['magnet_uri']) or ''
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
item = title, download_url, size, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class StrikeCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# set this 0 to suppress log line, since we aren't updating it anyways
self.minTime = 0
def _getRSSData(self):
# no rss for getstrike.net afaik, also can't search with empty string
return {'entries': {}}
provider = STRIKEProvider()
|
hale36/SRTV
|
sickbeard/providers/strike.py
|
Python
|
gpl-3.0
| 3,919
|
# -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
## app configuration made easy. Look inside private/appconfig.ini
from gluon.contrib.appconfig import AppConfig
## once in production, remove reload=True to gain full speed
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
db = DAL(myconf.get('db.uri'),
pool_size = myconf.get('db.pool_size'),
migrate_enabled = myconf.get('db.migrate'),
check_reserved = ['all'])
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## choose a style for forms
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
## (optional) static assets folder versioning
# response.static_version = '0.0.0'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Service, PluginManager
auth = Auth(db, host=myconf.get('host.name'))
service = Service()
plugins = PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
|
urrego093/proyecto_mv
|
applications/welcome/models/db.py
|
Python
|
gpl-3.0
| 4,097
|
from trueskill import TrueSkill, Rating, rate
import argparse
from pytba import api as tba
import math
class FrcTrueSkill:
def __init__(self):
self.env = TrueSkill(draw_probability=0.02)
self.trueskills = {}
self.events = {}
def update(self, red_alliance, red_score, blue_alliance, blue_score):
# Calculate teams per alliance
for alliance in [red_alliance, blue_alliance]:
for team in alliance:
if not team in self.trueskills:
self.trueskills[team] = self.env.Rating()
# Update ratings based on result
if red_score == blue_score: # Tied
if red_score == -1:
return # No result yet
ranks = [0, 0]
elif red_score > blue_score: # Red beat blue
ranks = [0, 1] # Lower is better
else:
ranks = [1, 0]
new_red, new_blue = self.env.rate([[trueskills[number] for number in red_alliance],
[trueskills[number] for number in blue_alliance]], ranks)
# Store the new values
new_ratings = new_red + new_blue
for rating, team_number in zip(new_ratings, red_alliance + blue_alliance):
self.trueskills[team_number] = rating
def predict(self, red_alliance, blue_alliance):
proba = self.env.quality([[teams[number] for number in red_alliance],
[teams[number] for number in blue_alliance]])
return math.round((1.0-proba)*100)
def skill(self, team):
return self.env.expose(trueskills[team])
def parse_matches(matches, env, predict=False):
count = 0.0
draws = 0.0
# Initialise our trueskills dictionary
trueskills = {}
for row in matches:
alliances = row['alliances']
red_alliance = alliances['red']['teams']
blue_alliance = alliances['blue']['teams']
# Calculate teams per alliance
for alliance in [red_alliance, blue_alliance]:
for team in alliance:
if not team in trueskills:
trueskills[team] = env.Rating()
# Update ratings based on result
if alliances['red']['score'] == alliances['blue']['score']: # Tied
if alliances['red']['score'] == -1:
if predict:
proba = env.quality([[teams[number] for number in red_alliance],
[teams[number] for number in blue_alliance]])
print(row['match_number'], [str(number)[3:] for number in red_alliance], [str(number)[3:] for number in blue_alliance], "Win probability: %2.0f:%2.0f" %((1.0-proba)*100,proba*100))
else:
continue # No result yet
ranks = [0, 0]
draws = draws + 1
elif alliances['red']['score'] > alliances['blue']['score']: # Red beat blue
ranks = [0, 1] # Lower is better
else:
ranks = [1, 0]
new_red, new_blue = env.rate([[trueskills[number] for number in red_alliance],
[trueskills[number] for number in blue_alliance]], ranks)
count = count + 1
# Store the new values
new_ratings = new_red + new_blue
for rating, team_number in zip(new_ratings, red_alliance + blue_alliance):
trueskills[team_number] = rating
if not predict:
if count > 0:
print("Draw rate: %f" % (draws / count))
print("Matches: %i" % count)
return trueskills
def get_all_matches(year):
matches = []
events = tba.tba_get('events/%s' % year)
for event in events:
matches += tba.event_get(event['key']).matches
return sorted(matches, key=lambda k: float('inf') if k['time'] is None else k['time'])
def sort_by_trueskill(trueskills, env):
return sorted(trueskills.items(), key=lambda k: env.expose(k[1]), reverse=True) # Sort by trueskill
def sort_by_name(trueskills):
return sorted(trueskills.items(), key=lambda k: ('0000' + k[0][3:])[-4:]) # Sort by team number
def print_trueskills(trueskills, env):
for k,v in trueskills:
print('%s: %f' % (k, env.expose(v)))
if __name__ == '__main__':
import datetime
now = datetime.datetime.now()
tba.set_api_key('frc4774', 'trueskill', '1.0')
parser = argparse.ArgumentParser(description='Run TrueSkill algorithm on event results.')
parser.add_argument('--predict', help='Predict unplayed matches', dest='predict', action='store_true')
parser.add_argument('--year', help='All matches in all events in specified year', type=str, default=str(now.year))
args = parser.parse_args()
# Set the draw probability based on previous data - around 3%
env = TrueSkill(draw_probability=0.025) # Try tweaking tau and beta too
matches = get_all_matches(args.year)
results = parse_matches(matches, env)
results = sort_by_trueskill(results, env)
#results = sort_by_name(results)
print_trueskills(results, env)
|
Ninjakow/TrueSkill
|
ranking.py
|
Python
|
gpl-3.0
| 5,051
|
# Generated by Django 2.2.4 on 2019-08-26 18:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('helpdesk', '0027_auto_20190826_0700'),
]
operations = [
migrations.AlterField(
model_name='followup',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='kbitem',
name='voted_by',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='queue',
name='default_owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='default_owner', to=settings.AUTH_USER_MODEL, verbose_name='Default owner'),
),
migrations.AlterField(
model_name='savedsearch',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='ticket',
name='assigned_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assigned_to', to=settings.AUTH_USER_MODEL, verbose_name='Assigned to'),
),
migrations.AlterField(
model_name='ticketcc',
name='user',
field=models.ForeignKey(blank=True, help_text='User who wishes to receive updates for this ticket.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='usersettings',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='usersettings_helpdesk', to=settings.AUTH_USER_MODEL),
),
]
|
auto-mat/klub
|
local_migrations/migrations_helpdesk/0028_auto_20190826_2034.py
|
Python
|
gpl-3.0
| 2,149
|
# -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <g.t@majerti.fr>
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
import os
from pyramid.httpexceptions import HTTPFound
from autonomie.forms.admin import (
ActivityConfigSchema,
)
from autonomie.models.activity import (
ActivityType,
ActivityMode,
ActivityAction,
)
from autonomie.views.admin.accompagnement import (
BaseAdminAccompagnement,
AccompagnementIndexView,
ACCOMPAGNEMENT_URL,
)
ACTIVITY_URL = os.path.join(ACCOMPAGNEMENT_URL, 'activity')
class AdminActivitiesView(BaseAdminAccompagnement):
"""
Activities Admin view
"""
title = u"Configuration du module de Rendez-vous"
schema = ActivityConfigSchema(title=u"")
route_name = ACTIVITY_URL
def before(self, form):
query = ActivityType.query()
types = query.filter_by(active=True)
modes = ActivityMode.query()
query = ActivityAction.query()
query = query.filter_by(parent_id=None)
actions = query.filter_by(active=True)
activity_appstruct = {
'footer': self.request.config.get("activity_footer", ""),
'types': [type_.appstruct() for type_ in types],
'modes': [mode.appstruct() for mode in modes],
'actions': self._recursive_action_appstruct(actions)
}
self._add_pdf_img_to_appstruct('activity', activity_appstruct)
form.set_appstruct(activity_appstruct)
def submit_success(self, activity_appstruct):
"""
Handle successfull activity configuration
"""
self.store_pdf_conf(activity_appstruct, 'activity')
# We delete the elements that are no longer in the appstruct
self.disable_types(activity_appstruct)
self.disable_actions(activity_appstruct, ActivityAction)
new_modes = self.delete_modes(activity_appstruct)
self.dbsession.flush()
self.add_types(activity_appstruct)
self.add_actions(activity_appstruct, "actions", ActivityAction)
self.add_modes(new_modes)
self.request.session.flash(self.validation_msg)
return HTTPFound(
self.request.route_path(self.parent_view.route_name)
)
def includeme(config):
config.add_route(ACTIVITY_URL, ACTIVITY_URL)
config.add_admin_view(AdminActivitiesView, parent=AccompagnementIndexView)
|
CroissanceCommune/autonomie
|
autonomie/views/admin/accompagnement/activities.py
|
Python
|
gpl-3.0
| 2,419
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
from constants import *
#For using unicode utf-8
reload(sys).setdefaultencoding("utf-8")
from PyQt4 import QtCore
from PyQt4 import QtGui
from uiQt_aboutdialog import Ui_aboutDialog
class aboutDialog(QtGui.QDialog, Ui_aboutDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.fillAll()
def fillAll(self):
name = """\
<b>%s %s</b><p>
%s<p>
%s"""% (NAME, VERSION, SUMMARY, DESCRIPTION)
self.nameAndVersion.setText(name.decode("utf-8"))
developers = """\
<b>Core Developer:</b><p>
%s<p>
%s"""% (CORE_DEVELOPER, CORE_EMAIL)
self.developersText.setText(developers.decode("utf-8"))
self.translatorsText.setText(TRANSLATORS.decode("utf-8"))
licenseFile = QtCore.QFile("COPYING")
if not licenseFile.open(QtCore.QIODevice.ReadOnly):
license = LICENSE_NAME
else:
textstream = QtCore.QTextStream(licenseFile)
textstream.setCodec("UTF-8")
license = textstream.readAll()
self.licenseText.setText(license)
iconsLicense = "CC by-nc-sa\nhttp://creativecommons.org/licenses/by-nc-sa/3.0/"
self.iconsLicenseText.setText(iconsLicense)
|
osmank3/bilge-katalog-old
|
qt/aboutDialog.py
|
Python
|
gpl-3.0
| 1,347
|
#!/usr/bin/env python3
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from models.settingsmodel import SettingsModel
from views.clipboardview import ClipBoardView
from views.downloadview import DownloadView
from views.settingsview import SettingsDialog, SettingsToolBar, ConfigurableToolBar
import re
from plugins import * # dont remove this line!
__author__ = "C. Wilhelm"
___license___ = "GPL v3"
class TrayIcon(QSystemTrayIcon):
def __init__(self, main_window):
QSystemTrayIcon.__init__(self, main_window)
self.mainWindow = main_window
self.activated.connect(self.trayActivated)
self.setIcon(QIcon("../img/icon.png"))
self.minimizeAction = QAction("Mi&nimize", self, triggered=main_window.hide)
self.maximizeAction = QAction("Ma&ximize", self, triggered=main_window.showMaximized)
self.restoreAction = QAction("&Restore", self, triggered=main_window.showNormal)
self.quitAction = QAction("&Quit", self, shortcut="Ctrl+Q", triggered=main_window.close)
self.quitAction.setIcon(QIcon.fromTheme("application-exit"))
menu = QMenu(main_window)
menu.addAction(self.minimizeAction)
menu.addAction(self.maximizeAction)
menu.addAction(self.restoreAction)
menu.addSeparator()
menu.addAction(self.quitAction)
self.setContextMenu(menu)
if QSystemTrayIcon.isSystemTrayAvailable():
self.show()
def trayActivated(self, reason):
if reason == QSystemTrayIcon.ActivationReason.DoubleClick:
self.mainWindow.showNormal() if self.mainWindow.isHidden() else self.mainWindow.hide()
class SearchBar(QLineEdit):
def __init__(self, callback=None):
QLineEdit.__init__(self)
self.button = QToolButton(self)
self.button.setIcon(QIcon("../img/edit-clear.png"))
self.button.setCursor(Qt.ArrowCursor)
self.button.clicked.connect(self.clear)
self.button.hide()
self.textChanged.connect(self.toggleButton)
if callback is not None:
self.returnPressed.connect(lambda: callback(self.text()))
# self.setFixedHeight(28)
self.setPlaceholderText(" < Search Term or URL >")
def resizeEvent(self, event):
self.button.setStyleSheet("QToolButton {margin: 0 0 0 0; border: 0px;}")
x = self.size().width() - self.button.sizeHint().width() - 2
y = (self.size().height() + 1 - self.button.sizeHint().height()) / 2
self.button.move(x, y)
def toggleButton(self):
self.button.setVisible(bool(self.text()))
class MainToolBar(ConfigurableToolBar):
def __init__(self, main_window):
ConfigurableToolBar.__init__(self, "Toolbar", main_window)
self.mainWindow = main_window
self.searchBar = SearchBar(callback=main_window.search)
self.openAction = QAction("&Open Container File", self, triggered=main_window.open)
self.startAction = QAction("&Start Downloads", self, triggered=self.togglePause)
self.pauseAction = QAction("&Pause Downloads", self, triggered=self.togglePause)
self.settingsAction = QAction("Prefere&nces", self, triggered=main_window.showSettings)
self.searchAction = QAction("Search Button", self, triggered=lambda: self.searchBar.returnPressed.emit())
self.openAction.setIcon(QIcon.fromTheme("folder-open"))
self.startAction.setIcon(QIcon.fromTheme("media-playback-start"))
self.pauseAction.setIcon(QIcon.fromTheme("media-playback-pause"))
self.settingsAction.setIcon(QIcon.fromTheme("emblem-system"))
self.searchAction.setIcon(QIcon.fromTheme("system-search"))
self.searchBarAction = QWidgetAction(self)
self.searchBarAction.setText("Search Bar") # make it checkable in the menu of visible actions
self.searchBarAction.setDefaultWidget(self.searchBar)
self.startButton = QToolButton(self)
self.startButton.setDefaultAction(self.startAction)
self.startButtonAction = QWidgetAction(self)
self.startButtonAction.setText("Start/Pause Downloads")
self.startButtonAction.setDefaultWidget(self.startButton)
self.addAction(self.openAction)
self.addAction(self.searchBarAction)
self.addAction(self.searchAction)
self.addAction(self.startButtonAction)
self.addAction(self.settingsAction)
def togglePause(self):
if self.startButton.defaultAction() == self.pauseAction:
self.startButton.removeAction(self.pauseAction)
self.startButton.setDefaultAction(self.startAction)
self.startAction.setDisabled(False)
self.pauseAction.setDisabled(True)
self.mainWindow.downloadView.model().pause()
else:
self.startButton.removeAction(self.startAction)
self.startButton.setDefaultAction(self.pauseAction)
self.pauseAction.setDisabled(False)
self.startAction.setDisabled(True)
self.mainWindow.tabBar.setCurrentWidget(self.mainWindow.downloadView)
self.mainWindow.downloadView.model().start()
class MainWindow(QMainWindow):
aboutToQuit = pyqtSignal()
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle("Media Fetcher")
self.setWindowIcon(QIcon("../img/icon.png"))
self.tray = TrayIcon(self)
self.settings = QSettings(QSettings.IniFormat, QSettings.UserScope, "MediaFetcher", "MediaFetcher")
self.settingsPath = QFileInfo(self.settings.fileName()).absolutePath()
self.settingsModel = SettingsModel(self.settings)
self.settingsDialog = SettingsDialog(self, self.settingsModel)
self.statusBar = SettingsToolBar(self, self.settingsModel)
self.addToolBar(Qt.BottomToolBarArea, self.statusBar)
self.toolBar = MainToolBar(self)
self.addToolBar(Qt.TopToolBarArea, self.toolBar)
self.initMenus()
self.initTabs()
self.loadSettings()
self.aboutToQuit.connect(self.writeSettings)
# monitor Clipboard
QApplication.clipboard().dataChanged.connect(self.clipBoardChanged)
self.clipboardView.addURL("https://www.youtube.com/watch?v=IsBOoY2zvC0")
def closeEvent(self, event):
# http://qt-project.org/doc/qt-5.0/qtwidgets/qwidget.html#closeEvent
# http://qt-project.org/doc/qt-5.0/qtcore/qcoreapplication.html#aboutToQuit
self.aboutToQuit.emit()
def loadSettings(self):
self.resize(600, 400)
def writeSettings(self):
pass
def showSettings(self):
self.settingsDialog.open()
def initMenus(self):
# toolbar actions may be set to invisible (exceptions: start, pause), so the main menu can't use these!
self.openAction = QAction("&Open Container File", self, shortcut=QKeySequence.Open, triggered=self.open)
self.settingsAction = QAction("Prefere&nces", self, triggered=self.showSettings)
self.openAction.setIcon(QIcon.fromTheme("folder-open"))
self.settingsAction.setIcon(QIcon.fromTheme("emblem-system"))
self.fileMenu = QMenu("&File", self)
self.fileMenu.addAction(self.openAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.toolBar.startAction)
self.fileMenu.addAction(self.toolBar.pauseAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.tray.quitAction)
self.editMenu = QMenu("&Edit", self)
self.editMenu.addAction(self.settingsAction)
self.viewMenu = QMenu("&View", self)
self.viewMenu.addAction(self.toolBar.toggleViewAction())
self.viewMenu.addAction(self.statusBar.toggleViewAction())
self.helpMenu = QMenu("&Help", self)
self.helpMenu.addAction(QAction("About", self, triggered=self.about))
self.menuBar().addMenu(self.fileMenu)
self.menuBar().addMenu(self.editMenu)
self.menuBar().addMenu(self.viewMenu)
self.menuBar().addMenu(self.helpMenu)
def addTab(self, widget, label, closable=True):
i = self.tabBar.count()
self.tabBar.addTab(widget, " %s " % label if not closable else label)
button = self.tabBar.tabBar().tabButton(i, QTabBar.RightSide)
button.setStyleSheet("QToolButton {margin: 0; padding: 0;}")
if not closable:
button.setFixedWidth(0)
self.tabBar.setCurrentIndex(i)
def initTabs(self):
self.tabBar = QTabWidget()
self.setCentralWidget(self.tabBar)
self.tabBar.setTabsClosable(True)
appropriate_height = QLineEdit().sizeHint().height()
self.tabBar.setStyleSheet("QTabBar::tab {height: %spx;}" % appropriate_height)
self.tabBar.tabCloseRequested.connect(lambda i: self.tabBar.removeTab(i))
# Downloads Tab
self.downloadView = DownloadView(self, self.settings)
self.addTab(self.downloadView, "Downloads", closable=False)
# Clipboard Tab
self.clipboardView = ClipBoardView(self, self.settings, self.downloadView)
self.addTab(self.clipboardView, "Clipboard", closable=False)
def clipBoardChanged(self):
if QApplication.clipboard().mimeData().hasText():
self.addURL(QApplication.clipboard().text())
def open(self):
fileName = QFileDialog.getOpenFileName(self, "Open File", QDir.homePath())
if fileName:
pass
def about(self):
QMessageBox.about(self, "About Media Fetcher", "Text")
def addURL(self, url):
# TODO: ignore/warn/ask when url is already in the clipboard
for url in re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+])+', url):
self.clipboardView.addURL(url)
self.tabBar.setCurrentWidget(self.clipboardView)
def search(self, text):
text = text.strip()
if text == "":
return
if 'http' in text:
return self.addURL(text)
searchwidget = QLabel("placeholder")
self.addTab(searchwidget, "Search for %s" % text)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
MainWindow().show()
sys.exit(app.exec_())
|
valmynd/MediaFetcher
|
src/main.py
|
Python
|
gpl-3.0
| 9,141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides class BaseCodeEditor; base class for
CodeEditor class in Coder
and CodeBox class in dlgCode (code component)
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import wx
import wx.stc
import sys
from pkg_resources import parse_version
from psychopy.constants import PY3
from psychopy import logging
from psychopy import prefs
from ..themes import ThemeMixin
from psychopy.localization import _translate
class BaseCodeEditor(wx.stc.StyledTextCtrl, ThemeMixin):
"""Provides base class for code editors
See the wxPython demo styledTextCtrl 2.
"""
def __init__(self, parent, ID, pos, size, style):
wx.stc.StyledTextCtrl.__init__(self, parent, ID, pos, size, style)
self.notebook = parent
self.UNSAVED = False
self.filename = ""
self.fileModTime = None # was file modified outside of CodeEditor
self.AUTOCOMPLETE = True
self.autoCompleteDict = {}
self._commentType = {'Py': '#', 'JS': '//', 'Both': '//' or '#'}
# doesn't pause strangely
self.locals = None # will contain the local environment of the script
self.prevWord = None
# remove some annoying stc key commands
CTRL = wx.stc.STC_SCMOD_CTRL
self.CmdKeyClear(ord('['), CTRL)
self.CmdKeyClear(ord(']'), CTRL)
self.CmdKeyClear(ord('/'), CTRL)
self.CmdKeyClear(ord('/'), CTRL | wx.stc.STC_SCMOD_SHIFT)
# 4 means 'tabs are bad'; 1 means 'flag inconsistency'
self.SetMargins(0, 0)
self.SetUseTabs(False)
self.SetTabWidth(4)
self.SetIndent(4)
self.SetBufferedDraw(False)
self.SetEOLMode(wx.stc.STC_EOL_LF)
# setup margins for line numbers
self.SetMarginType(0, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginWidth(0, 40)
# Setup a margin to hold fold markers
self.SetMarginType(1, wx.stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(1, wx.stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(1, True)
self.SetMarginWidth(1, 12)
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPEN,
wx.stc.STC_MARK_BOXMINUS, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDER,
wx.stc.STC_MARK_BOXPLUS, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERSUB,
wx.stc.STC_MARK_VLINE, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERTAIL,
wx.stc.STC_MARK_LCORNER, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEREND,
wx.stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPENMID,
wx.stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERMIDTAIL,
wx.stc.STC_MARK_TCORNER, "white", "#808080")
# Set what kind of events will trigger a modified event
self.SetModEventMask(wx.stc.STC_MOD_DELETETEXT |
wx.stc.STC_MOD_INSERTTEXT)
# Bind context menu
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def OnContextMenu(self, event):
"""Sets the context menu for components using code editor base class"""
if not hasattr(self, "UndoID"):
# Create a new ID for all items
self.UndoID = wx.NewId()
self.RedoID = wx.NewId()
self.CutID = wx.NewId()
self.CopyID = wx.NewId()
self.PasteID = wx.NewId()
self.DeleteID = wx.NewId()
self.SelectAllID = wx.NewId()
# Bind items to relevant method
self.Bind(wx.EVT_MENU, self.onUndo, id=self.UndoID)
self.Bind(wx.EVT_MENU, self.onRedo, id=self.RedoID)
self.Bind(wx.EVT_MENU, self.onCut, id=self.CutID)
self.Bind(wx.EVT_MENU, self.onCopy, id=self.CopyID)
self.Bind(wx.EVT_MENU, self.onPaste, id=self.PasteID)
self.Bind(wx.EVT_MENU, self.onDelete, id=self.DeleteID)
self.Bind(wx.EVT_MENU, self.onSelectAll, id=self.SelectAllID)
# Create menu and menu items
menu = wx.Menu()
undoItem = wx.MenuItem(menu, self.UndoID, _translate("Undo"))
redoItem = wx.MenuItem(menu, self.RedoID, _translate("Redo"))
cutItem = wx.MenuItem(menu, self.CutID, _translate("Cut"))
copyItem = wx.MenuItem(menu, self.CopyID, _translate("Copy"))
pasteItem = wx.MenuItem(menu, self.PasteID, _translate("Paste"))
deleteItem = wx.MenuItem(menu, self.DeleteID, _translate("Delete"))
selectItem = wx.MenuItem(menu, self.SelectAllID, _translate("Select All"))
# Check whether items should be enabled
undoItem.Enable(self.CanUndo())
redoItem.Enable(self.CanRedo())
cutItem.Enable(self.CanCut())
copyItem.Enable(self.CanCopy())
pasteItem.Enable(self.CanPaste())
deleteItem.Enable(self.CanCopy())
# Append items to menu
menu.Append(undoItem)
menu.Append(redoItem)
menu.AppendSeparator()
menu.Append(cutItem)
menu.Append(copyItem)
menu.Append(pasteItem)
menu.AppendSeparator()
menu.Append(deleteItem)
menu.Append(selectItem)
self.PopupMenu(menu)
menu.Destroy()
def onUndo(self, event):
"""For context menu Undo"""
foc = self.FindFocus()
if hasattr(foc, 'Undo'):
foc.Undo()
def onRedo(self, event):
"""For context menu Redo"""
foc = self.FindFocus()
if hasattr(foc, 'Redo'):
foc.Redo()
def onCut(self, event):
"""For context menu Cut"""
foc = self.FindFocus()
if hasattr(foc, 'Cut'):
foc.Cut()
def onCopy(self, event):
"""For context menu Copy"""
foc = self.FindFocus()
if hasattr(foc, 'Copy'):
foc.Copy()
def onPaste(self, event):
"""For context menu Paste"""
foc = self.FindFocus()
if hasattr(foc, 'Paste'):
foc.Paste()
def onSelectAll(self, event):
"""For context menu Select All"""
foc = self.FindFocus()
if hasattr(foc, 'SelectAll'):
foc.SelectAll()
def onDelete(self, event):
"""For context menu Delete"""
foc = self.FindFocus()
if hasattr(foc, 'DeleteBack'):
foc.DeleteBack()
def OnKeyPressed(self, event):
pass
def HashtagCounter(self, text, nTags=0):
# Hashtag counter - counts lines beginning with hashtags in selected text
for lines in text.splitlines():
if lines.startswith('#'):
nTags += 1
elif lines.startswith('//'):
nTags += 2
return nTags
def toggleCommentLines(self):
codeType = "Py"
if hasattr(self, "codeType"):
codeType = self.codeType
startText, endText = self._GetPositionsBoundingSelectedLines()
nLines = len(self._GetSelectedLineNumbers())
nHashtags = self.HashtagCounter(self.GetTextRange(startText, endText))
passDec = False # pass decision - only pass if line is blank
# Test decision criteria, and catch devision errors
# when caret starts at line with no text, or at beginning of line...
try:
devCrit, decVal = .6, nHashtags / nLines # Decision criteria and value
except ZeroDivisionError:
if self.LineLength(self.GetCurrentLine()) == 1:
self._ReplaceSelectedLines(self._commentType[codeType])
devCrit, decVal, passDec = 1, 0, True
else:
self.CharRightExtend() # Move caret so line is counted
devCrit, decVal = .6, nHashtags / len(self._GetSelectedLineNumbers())
newText = ''
# Add or remove hashtags/JS comments from selected text, but pass if # added tp blank line
if decVal < devCrit and passDec == False:
for lineNo in self._GetSelectedLineNumbers():
lineText = self.GetLine(lineNo)
newText = newText + self._commentType[codeType] + lineText
elif decVal >= devCrit and passDec == False:
for lineNo in self._GetSelectedLineNumbers():
lineText = self.GetLine(lineNo)
if lineText.startswith(self._commentType[codeType]):
lineText = lineText[len(self._commentType[codeType]):]
newText = newText + lineText
self._ReplaceSelectedLines(newText)
def _GetSelectedLineNumbers(self):
# used for the comment/uncomment machinery from ActiveGrid
selStart, selEnd = self._GetPositionsBoundingSelectedLines()
start = self.LineFromPosition(selStart)
end = self.LineFromPosition(selEnd)
if selEnd == self.GetTextLength():
end += 1
return list(range(start, end))
def _GetPositionsBoundingSelectedLines(self):
# used for the comment/uncomment machinery from ActiveGrid
startPos = self.GetCurrentPos()
endPos = self.GetAnchor()
if startPos > endPos:
startPos, endPos = endPos, startPos
if endPos == self.PositionFromLine(self.LineFromPosition(endPos)):
# If it's at the very beginning of a line, use the line above it
# as the ending line
endPos = endPos - 1
selStart = self.PositionFromLine(self.LineFromPosition(startPos))
selEnd = self.PositionFromLine(self.LineFromPosition(endPos) + 1)
return selStart, selEnd
def _ReplaceSelectedLines(self, text):
# used for the comment/uncomment machinery from ActiveGrid
# If multi line selection - keep lines selected
# For single lines, move to next line and select that line
if len(text) == 0:
return
selStart, selEnd = self._GetPositionsBoundingSelectedLines()
self.SetSelection(selStart, selEnd)
self.ReplaceSelection(text)
if len(text.splitlines()) > 1:
self.SetSelection(selStart, selStart + len(text))
else:
self.SetSelection(
self.GetCurrentPos(),
self.GetLineEndPosition(self.GetCurrentLine()))
def smartIdentThisLine(self):
codeType = "Py"
if hasattr(self, "codeType"):
codeType = self.codeType
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
prevLine = self.GetLine(startLineNum - 1)
prevIndent = self.GetLineIndentation(startLineNum - 1)
signal = {'Py': ':', 'JS': '{'}
# set the indent
self.SetLineIndentation(startLineNum, prevIndent)
self.VCHome()
# check for a colon (Python) or curly brace (JavaScript) to signal an indent
prevLogical = prevLine.split(self._commentType[codeType])[0]
prevLogical = prevLogical.strip()
if len(prevLogical) > 0 and prevLogical[-1] == signal[codeType]:
self.CmdKeyExecute(wx.stc.STC_CMD_TAB)
elif len(prevLogical) > 0 and prevLogical[-1] == '}' and codeType == 'JS':
self.CmdKeyExecute(wx.stc.STC_SCMOD_SHIFT + wx.stc.STC_CMD_TAB)
def smartIndent(self):
# find out about current positions and indentation
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
prevLine = self.GetLine(startLineNum - 1)
prevIndent = self.GetLineIndentation(startLineNum - 1)
startLineIndent = self.GetLineIndentation(startLineNum)
# calculate how much we need to increment/decrement the current lines
incr = prevIndent - startLineIndent
# check for a colon to signal an indent decrease
prevLogical = prevLine.split('#')[0]
prevLogical = prevLogical.strip()
if len(prevLogical) > 0 and prevLogical[-1] == ':':
incr = incr + 4
# set each line to the correct indentation
self.BeginUndoAction()
for lineNum in range(startLineNum, endLineNum + 1):
thisIndent = self.GetLineIndentation(lineNum)
self.SetLineIndentation(lineNum, thisIndent + incr)
self.EndUndoAction()
def shouldTrySmartIndent(self):
# used when the user presses tab key: decide whether to insert
# a tab char or whether to smart indent text
# if some text has been selected then use indentation
if len(self.GetSelectedText()) > 0:
return True
# test whether any text precedes current pos
lineText, posOnLine = self.GetCurLine()
textBeforeCaret = lineText[:posOnLine]
if textBeforeCaret.split() == []:
return True
else:
return False
def indentSelection(self, howFar=4):
# Indent or outdent current selection by 'howFar' spaces
# (which could be positive or negative int).
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
# go through line-by-line
self.BeginUndoAction()
for lineN in range(startLineNum, endLineNum + 1):
newIndent = self.GetLineIndentation(lineN) + howFar
if newIndent < 0:
newIndent = 0
self.SetLineIndentation(lineN, newIndent)
self.EndUndoAction()
def Paste(self, event=None):
dataObj = wx.TextDataObject()
clip = wx.Clipboard().Get()
clip.Open()
success = clip.GetData(dataObj)
clip.Close()
if success:
txt = dataObj.GetText()
# dealing with unicode error in wx3 for Mac
if parse_version(wx.__version__) >= parse_version('3') and sys.platform == 'darwin' and not PY3:
try:
# if we can decode from utf-8 then all is good
txt.decode('utf-8')
except Exception as e:
logging.error(str(e))
# if not then wx conversion broke so get raw data instead
txt = dataObj.GetDataHere()
self.ReplaceSelection(txt.replace("\r\n", "\n").replace("\r", "\n"))
self.analyseScript()
def analyseScript(self):
"""Analyse the script."""
pass
@property
def edgeGuideVisible(self):
return self.GetEdgeMode() != wx.stc.STC_EDGE_NONE
@edgeGuideVisible.setter
def edgeGuideVisible(self, value):
if value is True:
self.SetEdgeMode(wx.stc.STC_EDGE_LINE)
else:
self.SetEdgeMode(wx.stc.STC_EDGE_NONE)
@property
def edgeGuideColumn(self):
return self.GetEdgeColumn()
@edgeGuideColumn.setter
def edgeGuideColumn(self, value):
self.SetEdgeColumn(value)
# def _applyAppTheme(self, target=None):
# """Overrides theme change from ThemeMixin.
# Don't call - this is called at the end of theme.setter"""
# # ThemeMixin._applyAppTheme() # only needed for children
# spec = ThemeMixin.codeColors
# base = spec['base']
#
# # Check for language specific spec
# if self.GetLexer() in self.lexers:
# lexer = self.lexers[self.GetLexer()]
# else:
# lexer = 'invlex'
# if lexer in spec:
# # If there is lang specific spec, delete subkey...
# lang = spec[lexer]
# del spec[lexer]
# #...and append spec to root, overriding any generic spec
# spec.update({key: lang[key] for key in lang})
# else:
# lang = {}
#
# # Override base font with user spec if present
# key = 'outputFont' if isinstance(self, wx.py.shell.Shell) else 'codeFont'
# if prefs.coder[key] != "From theme...":
# base['font'] = prefs.coder[key]
#
# # Pythonise the universal data (hex -> rgb, tag -> wx int)
# invalid = []
# for key in spec:
# # Check that key is in tag list and full spec is defined, discard if not
# if key in self.tags \
# and all(subkey in spec[key] for subkey in ['bg', 'fg', 'font']):
# spec[key]['bg'] = self.hex2rgb(spec[key]['bg'], base['bg'])
# spec[key]['fg'] = self.hex2rgb(spec[key]['fg'], base['fg'])
# if not spec[key]['font']:
# spec[key]['font'] = base['font']
# spec[key]['size'] = int(self.prefs['codeFontSize'])
# else:
# invalid += [key]
# for key in invalid:
# del spec[key]
# # Set style for undefined lexers
# for key in [getattr(wx._stc, item) for item in dir(wx._stc) if item.startswith("STC_LEX")]:
# self.StyleSetBackground(key, base['bg'])
# self.StyleSetForeground(key, base['fg'])
# self.StyleSetSpec(key, "face:%(font)s,size:%(size)d" % base)
# # Set style from universal data
# for key in spec:
# if self.tags[key] is not None:
# self.StyleSetBackground(self.tags[key], spec[key]['bg'])
# self.StyleSetForeground(self.tags[key], spec[key]['fg'])
# self.StyleSetSpec(self.tags[key], "face:%(font)s,size:%(size)d" % spec[key])
# # Apply keywords
# for level, val in self.lexkw.items():
# self.SetKeyWords(level, " ".join(val))
#
# # Make sure there's some spec for margins
# if 'margin' not in spec:
# spec['margin'] = base
# # Set margin colours to match linenumbers if set
# if 'margin' in spec:
# mar = spec['margin']['bg']
# else:
# mar = base['bg']
# self.SetFoldMarginColour(True, mar)
# self.SetFoldMarginHiColour(True, mar)
#
# # Make sure there's some spec for caret
# if 'caret' not in spec:
# spec['caret'] = base
# # Set caret colour
# self.SetCaretForeground(spec['caret']['fg'])
# self.SetCaretLineBackground(spec['caret']['bg'])
# self.SetCaretWidth(1 + ('bold' in spec['caret']['font']))
#
# # Make sure there's some spec for selection
# if 'select' not in spec:
# spec['select'] = base
# spec['select']['bg'] = self.shiftColour(base['bg'], 30)
# # Set selection colour
# self.SetSelForeground(True, spec['select']['fg'])
# self.SetSelBackground(True, spec['select']['bg'])
#
# # Set wrap point
# self.edgeGuideColumn = self.prefs['edgeGuideColumn']
# self.edgeGuideVisible = self.edgeGuideColumn > 0
#
# # Set line spacing
# spacing = min(int(self.prefs['lineSpacing'] / 2), 64) # Max out at 64
# self.SetExtraAscent(spacing)
# self.SetExtraDescent(spacing)
|
psychopy/versions
|
psychopy/app/coder/codeEditorBase.py
|
Python
|
gpl-3.0
| 19,323
|
import re
import keyword
import logging
import builtins
from keypad.api import BufferController, autoconnect
from keypad.core.syntaxlib import SyntaxHighlighter, lazy
_python_kwlist = frozenset(keyword.kwlist) - frozenset('from import None False True'.split())
_python_builtins = frozenset(x for x in dir(builtins) if not isinstance(getattr(builtins, x), type))
_python_types = frozenset(x for x in dir(builtins) if isinstance(getattr(builtins, x), type))
@lazy
def pylexer():
from keypad.core.syntaxlib import keyword, regex, region
Keyword = keyword(_python_kwlist, dict(lexcat='keyword'))
Import = keyword('from import'.split(), dict(lexcat='keyword.modulesystem'))
Const = keyword(_python_builtins, dict(lexcat='identifier.constant'))
Type = keyword(_python_types, dict(lexcat='identifier.type'))
ESCAPE = dict(lexcat='literal.string.escape')
STRING = dict(lexcat='literal.string')
COMMENT = dict(lexcat='comment')
FUNCTION = dict(lexcat='identifier.function')
TODO = dict(lexcat='todo')
SIGIL = dict(lexcat='punctuation.sigil')
NUMBER = dict(lexcat='literal.numeric')
Todo = regex(r'\btodo:|\bfixme:|\bhack:', TODO, flags=re.IGNORECASE)
Comment = region(guard=regex('#'),
exit=regex('$'),
contains=[Todo],
attrs=COMMENT)
HEX = r'[a-fA-F0-9]'
Esc1 = regex(r'''\\[abfnrtv'"\\]''', ESCAPE)
Esc2 = regex(r'''\\\[0-7]{1,3}''', ESCAPE)
Esc3 = regex(r'''\\x[a-fA-F0-9]{2}''', ESCAPE)
Esc4 = regex(r'\\u' + HEX + r'{4}|\\U' + HEX + '{8}', ESCAPE)
Esc5 = regex(r'\\N\{[a-zA-Z]+(?:\s[a-zA-Z]+)*}', ESCAPE)
Esc6 = regex(r'\\$', ESCAPE)
DQDoctest = region(
guard=regex(r'^\s*>>>\s'),
exit=regex(r'$|(?=""")'),
contains=(),
attrs=ESCAPE
)
SQDoctest = region(
guard=regex(r'^\s*>>>\s'),
exit=regex(r"$|(?=''')"),
contains=(),
attrs=ESCAPE
)
Escs = [Esc1, Esc2, Esc3, Esc4, Esc5, Esc6]
DQString = region(
guard=regex(r'"(?!"")'),
exit=regex(r'"'),
contains=Escs,
attrs=STRING
)
SQString = region(
guard=regex(r"'(?!'')"),
exit=regex(r"'"),
contains=Escs,
attrs=STRING
)
TDQString = region(
guard=regex(r'"""'),
exit=regex(r'"""'),
contains=Escs + [DQDoctest],
attrs=STRING
)
TSQString = region(
guard=regex(r"'''"),
exit=regex(r"'''"),
contains=Escs + [SQDoctest],
attrs=STRING
)
def make_raw_string(quote):
return region(
guard=regex(r"r" + quote),
exit=regex(r"\\\\" + quote + "|" + r"(?<!\\)" + quote),
contains=[regex(r"(?<!\\)\\" + quote, ESCAPE)],
attrs=STRING
)
RSQString = make_raw_string("'")
RDQString = make_raw_string('"')
RTSQString = make_raw_string("'''")
RTDQString = make_raw_string('"""')
FloatLiteral = regex(r'\b\d*\.\d+', NUMBER)
IntLiteral = regex(r'\b\d+L?', NUMBER)
HexLiteral = regex(r'\b0x' + HEX + r'+L?', NUMBER)
OctLiteral = regex(r'\b0o[0-7]+L?', NUMBER)
BinLiteral = regex(r'\b0b[01]+L?', NUMBER)
FuncDef = regex(r'(?:(?<=\bdef)|(?<=\bclass)|(?<=@))\s+\w+', FUNCTION)
Deco = regex(r'(?<=@)\s*[\w.]+', FUNCTION)
CommAt = regex(re.escape('@'), SIGIL)
PythonLexers = [
Keyword,
Const,
Import,
DQString,
SQString,
TDQString,
TSQString,
RSQString,
RDQString,
IntLiteral,
HexLiteral,
OctLiteral,
BinLiteral,
FloatLiteral,
Comment,
FuncDef,
CommAt,
RTSQString,
RTDQString,
Deco,
Type
]
DQDoctest.contains = tuple(PythonLexers)
SQDoctest.contains = tuple(PythonLexers)
Python = region(
guard=None,
exit=None,
contains=PythonLexers
)
return Python
@autoconnect(BufferController.buffer_needs_highlight,
lambda tags: tags.get('syntax') == 'python')
def python_syntax_highlighting(controller):
highlighter = SyntaxHighlighter('keypad.plugins.pycomplete.syntax', pylexer(), dict(lexcat=None))
highlighter.highlight_buffer(controller.buffer)
def main():
from keypad.plugins.semantics.syntaxlib import Tokenizer
from keypad.core import AttributedString
from keypad.buffers import Buffer
buf = Buffer()
buf.insert((0,0), "'\\b")
highlighter = SyntaxHighlighter('h', pylexer(), dict(lexcat=None))
highlighter.highlight_buffer(buf)
print(buf.lines[0])
if __name__ == '__main__':
main()
|
sam-roth/Keypad
|
keypad/plugins/pymodel/syntax.py
|
Python
|
gpl-3.0
| 5,339
|
"""
__init__.py
ist303-miye
Copyright (C) 2017
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
"""
from .cwebview import *
|
morpheby/ist303-miye
|
client/__init__.py
|
Python
|
gpl-3.0
| 755
|
"""
model.py
by Ted Morin
contains a function to predict 10-year Atrial Fibrilation risks using beta coefficients from
10.1016:S0140-6736(09)60443-8
2010 Development of a Risk Score for Atrial Fibrillation in the Community
Framingham Heart Study
translated and optimized from FHS online risk calculator's javascript
function expects parameters of
"Male Sex" "Age" "BMI" "Systolic BP" "Antihypertensive Medication Use" "PR Interval" "Sig. Murmur" "Prev Heart Fail"
years kg/m^2 mm Hg mSec
bool int/float int/float int/float bool int/float bool bool
"""
"""
# originally part of the function, calculates xbar_value
xbar_values = np.array([
0.4464, # gender
60.9022, # age
26.2861, # bmi
136.1674, # sbp
0.2413, # hrx
16.3901, # pr_intv
0.0281, # vhd
0.0087, # hxchf
3806.9000, # age2
1654.6600, # gender_age2
1.8961, # age_vhd
0.6100 # age_hxchf
])
xbar_value = np.dot(xbar_values,betas) # this constant should be hard coded like s0!
# (and now it is)
"""
def model(ismale, age, bmi, sbp, antihyp, pr_intv, sigmurm, phf):
# convert seconds to milliseconds as used in regression
pr_intv = pr_intv * 1000.0
# inexplicable conversion
pr_intv = pr_intv / 10.0
# this was done in the js, and the output seems much more realistic than otherwise, but it seems inexplicable!
# perhaps the coefficient shown in FHS's website is erroneous? Or uses the wrong units? It is hard to say.
import numpy as np
# betas
betas = np.array([
1.994060, #gender
0.150520, #age
0.019300, #bmi Body Mass Index
0.00615, #sbp Systolic Blood Pressure
0.424100, #hrx Treatment for hypertension
0.070650, #pr_intv PR interval
3.795860, #vhd Significant Murmur
9.428330, #hxchf Prevalent Heart Failure
-0.000380, #age2 age squared
-0.000280, #gender_age2 male gender times age squared
-0.042380, #age_vhd age times murmur
-0.123070 #age_hxchf age times prevalent heart failure
])
s0 = 0.96337 # "const is from the spreadsheet"
xbar_value = 10.785528582
values = [ismale, age, bmi, sbp, antihyp, pr_intv, sigmurm, phf]
# calculate derived values
values.append(age*age) # age squared
values.append(ismale*age*age) # gender times age squared
values.append(sigmurm*age) # age times significant murmur
values.append(phf*age)
values = np.array(values)
# dot product
value = np.dot(values, betas)
# calculate using cox regression model
risk = 1.0 - np.power(s0, np.exp(value - xbar_value));
# cap at .3
#if (risk > .3) : risk = .3 # is this justified by the paper?
return risk
|
doirisks/dori
|
models/10.1016:S0140-6736(09)60443-8/model.py
|
Python
|
gpl-3.0
| 3,494
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.db.models import Q
from pootle_statistics.models import SubmissionTypes
from pootle_store.constants import FUZZY, TRANSLATED, UNTRANSLATED
from pootle_store.util import SuggestionStates
class FilterNotFound(Exception):
pass
class BaseUnitFilter(object):
def __init__(self, qs, *args_, **kwargs_):
self.qs = qs
def filter(self, unit_filter):
try:
return getattr(
self, "filter_%s" % unit_filter.replace("-", "_"))()
except AttributeError:
raise FilterNotFound()
class UnitChecksFilter(BaseUnitFilter):
def __init__(self, qs, *args, **kwargs):
super(UnitChecksFilter, self).__init__(qs, *args, **kwargs)
self.checks = kwargs.get("checks")
self.category = kwargs.get("category")
def filter_checks(self):
if self.checks:
return self.qs.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=self.checks).distinct()
if self.category:
return self.qs.filter(
qualitycheck__false_positive=False,
qualitycheck__category=self.category).distinct()
return self.qs.filter(
qualitycheck__false_positive=False,
).distinct()
class UnitStateFilter(BaseUnitFilter):
"""Filter a Unit qs based on unit state"""
def filter_all(self):
return self.qs.all()
def filter_translated(self):
return self.qs.filter(state=TRANSLATED)
def filter_untranslated(self):
return self.qs.filter(state=UNTRANSLATED)
def filter_fuzzy(self):
return self.qs.filter(state=FUZZY)
def filter_incomplete(self):
return self.qs.filter(
Q(state=UNTRANSLATED) | Q(state=FUZZY))
class UnitContributionFilter(BaseUnitFilter):
"""Filter a Unit qs based on user contributions"""
def __init__(self, qs, *args, **kwargs):
super(UnitContributionFilter, self).__init__(qs, *args, **kwargs)
self.user = kwargs.get("user")
def filter_suggestions(self):
return self.qs.filter(
suggestion__state=SuggestionStates.PENDING).distinct()
def filter_user_suggestions(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
suggestion__user=self.user,
suggestion__state=SuggestionStates.PENDING).distinct()
def filter_my_suggestions(self):
return self.filter_user_suggestions()
def filter_user_suggestions_accepted(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
suggestion__user=self.user,
suggestion__state=SuggestionStates.ACCEPTED).distinct()
def filter_user_suggestions_rejected(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
suggestion__user=self.user,
suggestion__state=SuggestionStates.REJECTED).distinct()
def filter_user_submissions(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
submitted_by=self.user,
submission__type__in=SubmissionTypes.EDIT_TYPES).distinct()
def filter_my_submissions(self):
return self.filter_user_submissions()
def filter_user_submissions_overwritten(self):
if not self.user:
return self.qs.none()
qs = self.qs.filter(
submitted_by=self.user,
submission__type__in=SubmissionTypes.EDIT_TYPES)
return qs.exclude(submitted_by=self.user).distinct()
def filter_my_submissions_overwritten(self):
return self.filter_user_submissions_overwritten()
class UnitSearchFilter(object):
filters = (UnitChecksFilter, UnitStateFilter, UnitContributionFilter)
def filter(self, qs, unit_filter, *args, **kwargs):
for search_filter in self.filters:
# try each of the filter classes to find one with a method to handle
# `unit_filter`
try:
return search_filter(qs, *args, **kwargs).filter(unit_filter)
except FilterNotFound:
pass
# if none match then return the empty qs
return qs.none()
class UnitTextSearch(object):
"""Search Unit's fields for text strings
"""
search_fields = (
"source_f", "target_f", "locations",
"translator_comment", "developer_comment")
search_mappings = {
"notes": ["translator_comment", "developer_comment"],
"source": ["source_f"],
"target": ["target_f"]}
def __init__(self, qs):
self.qs = qs
def get_search_fields(self, sfields):
search_fields = set()
for field in sfields:
if field in self.search_mappings:
search_fields.update(self.search_mappings[field])
elif field in self.search_fields:
search_fields.add(field)
return search_fields
def get_words(self, text, exact):
if exact:
return [text]
return [t.strip() for t in text.split(" ") if t.strip()]
def search(self, text, sfields, exact=False):
result = self.qs.none()
words = self.get_words(text, exact)
for k in self.get_search_fields(sfields):
result = result | self.search_field(k, words)
return result
def search_field(self, k, words):
subresult = self.qs
for word in words:
subresult = subresult.filter(
**{("%s__icontains" % k): word})
return subresult
|
iafan/zing
|
pootle/apps/pootle_store/unit/filters.py
|
Python
|
gpl-3.0
| 5,933
|
import random
from .tiles import base
INITIAL_TILES = [
base.ASSASSIN, base.BOWMAN, base.CHAMPION, base.DRAGOON, base.FOOTMAN,
base.GENERAL, base.KNIGHT, base.LONGBOWMAN, base.MARSHALL, base.PIKEMAN,
base.PIKEMAN, base.PRIEST, base.RANGER, base.SEER, base.WIZARD,
]
class Game(object):
def __init__(self, initial_tiles=INITIAL_TILES):
self.board = {}
self.bags = (initial_tiles[:], initial_tiles[:])
for bag in self.bags:
random.shuffle(bag)
|
rorytrent/the-duke
|
duke/game.py
|
Python
|
gpl-3.0
| 499
|
#!/usr/bin/env python
import sys
import re
import subprocess
from jinja2 import Template
import replacements
TEMPLATENAME = "base.svg.template"
# modifiers for layers in order as in keymap
MODIFIERS = [
[],
["SHIFT"],
["MOD3"],
["MOD3", "SHIFT"],
["MOD4"],
["MOD4", "SHIFT"],
["MOD3", "MOD4"],
[]
]
LAYERNAMES = ["1", "2", "3", "5", "4", "Pseudoebene", "6", ""]
# 1E9E = Latin Capital Letter Sharp S
upper_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÜ\u1e9e'
lower_chars = 'abcdefghijklmnopqrstuvwxyzäöüß'
CAPS_MAP = str.maketrans(dict(zip(upper_chars + lower_chars,
lower_chars + upper_chars)))
assert len(lower_chars) == len(upper_chars) == 30
assert len(CAPS_MAP) == len(lower_chars) + len(upper_chars)
def keymap_to_keys(text):
# simple and dump parser for xkb keymap files
#
# It simply searches all "key { … };" parts and splits them.
# A more advanced version would parts "xkb_symbols { … }" first
# and only search in this part.
assert text.startswith("xkb_keymap")
KEY_PATTERN = r'\s key \s .+? \s { [^}]+? };'
SYMBOLS_PATTERN = r'\[ (.+?) \]'
text = text.split('xkb_symbols', 1)[1]
# FIXME: assumes the next section (if there is one) is
# xkb_geometry
text = text.split('xkb_geometry', 1)[0]
for k in re.findall(KEY_PATTERN, text, re.M+re.X):
_, name, text = k.split(None, 2)
name = name.strip('<').rstrip('>')
text = text.replace('symbols[Group1]', '')
symbols = re.findall(SYMBOLS_PATTERN, text, re.M+re.X)
if not symbols:
raise SystemExit(f"{name} did not match: {text!r}")
if len(symbols) != 1:
print("currious key:", name, symbols)
symbols = [s.strip() for s in symbols[0].split(',')]
# replace keynames with the symbol they produce
symbols = [replacements.f(s) for s in symbols]
# Some keys aren't layered, hence the list is too short.
# pad them with the first entry.
symbols = (symbols + symbols[:1]*9)[:9]
yield name, symbols
# --- argument handling ---
if len(sys.argv) not in (2, 3):
raise SystemExit('Usage: ./<this script> variantname [numpad]')
layout = sys.argv[1]
numpad = (len(sys.argv) == 3 and sys.argv[2] == "numpad")
swap_m3r_ä = (layout == "vou" or layout == "mine")
vou = (layout == "vou")
mine = (layout == "mine")
version = "numpad" if numpad else "tkl"
# - read data and template
keymap = subprocess.check_output(
["xkbcli", "compile-keymap", "--layout", "de", "--variant", layout],
text=True)
keymap = dict(keymap_to_keys(keymap))
with open(TEMPLATENAME) as templatefile:
template = Template(templatefile.read())
# --- generate files ---
def write_image(layername, layerdict):
layerdict["numpad"] = numpad
layerdict["swap_m3r_ä"] = swap_m3r_ä
layerdict["vou"] = vou
layerdict["mine"] = mine
with open(f'{layout}-{layername}-{version}.svg', 'w') as out:
out.write(template.render(layerdict))
def make_caps_lock(text):
if len(text) == 1:
return text.translate(CAPS_MAP)
else:
return text
# - main layers
for layer in range(7): # 7 because the last layer is empty
# create a dict with the replacements from replacements.py
layerdict = {a: b[layer] for a, b in keymap.items()}
# color modifiers accordingly
for x in MODIFIERS[layer]:
layerdict[x] = " pressed"
write_image(LAYERNAMES[layer], layerdict)
filename = f'{layout}-{LAYERNAMES[layer]}-{version}.svg'
with open(filename, 'w') as out:
out.write(template.render(layerdict))
# - caps-lock images
for layer in 0, 1:
# create a dict with the replacements from replacements.py
layerdict = {a: make_caps_lock(b[layer]) for a, b in keymap.items()}
# color modifiers accordingly
for x in MODIFIERS[layer]:
layerdict[x] = " pressed"
write_image(LAYERNAMES[layer] + 'caps', layerdict)
# - "leer" image
write_image('leer', {})
|
neo-layout/neo-layout
|
grafik/bilder-einzeln/flat/generate-graphics.py
|
Python
|
gpl-3.0
| 4,077
|
import pygame
from vec_2d import Vec2d
import time
from types import *
from graphics_helper import Plotter
#------------------------------------------------------------------------
#
# This file is part of Conquer.
#
# Conquer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Conquer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Conquer. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Conquer Development Team (http://code.google.com/p/pyconquer/)
#
#------------------------------------------------------------------------
class TGameMenu:
def __init__(self, screen, bg_image, logo1, menu_items, start_vec, spacing=50):
# Currently selected menuitem's index
self.pos = 0
# List of menuitems
self.menu_items = menu_items
# Pointer to pygame screen
self.screen = screen
# Coordinates where to render the menu
self.start_vec = start_vec
# Space between menuitems
self.spacing = spacing
# Background picture is oddly here as well the top logo
self.bg = bg_image
self.logo = logo1
self.plotter = Plotter(self.screen)
# Font to be used with the menu
self.used_font = pygame.font.Font("yanone_regular.otf", 24)
self.plotter.cur_font = self.used_font
def draw_items(self, text=None):
# If images and/or text are supplied, draw them
if self.bg:
self.screen.blit(self.bg, (0, 0))
if self.logo:
self.screen.blit(self.logo, (263, 0))
if text:
self.plotter.text_at(text[0], Vec2d(text[1], text[2]),
font=self.used_font, wipe_background=True, color=(255, 255, 255))
# Iterate through menu items
for i, item_i in enumerate(self.menu_items):
# FIXME: skinnable colors
# Menu item color is white
cur_color = (0, 0, 0)
shadow = True
if i == self.pos:
# Selected menu item is red
shadow = False
cur_color = (255, 0, 0)
# Text to be rendered
text = item_i[0]
# Check if menu items are value editors
if len(item_i[2]) >= 2:
if item_i[2][0] == "value_int_editor":
text = "%s (%d)" % (text, item_i[2][1])
if item_i[2][0] == "value_bool_editor":
if item_i[2][1]:
text = "%s (%s)" % (text, "on")
else:
text = "%s (%s)" % (text, "off")
# Draw the menu item text
self.plotter.text_at(text,
self.start_vec + Vec2d(0, self.spacing) * i,
color=cur_color,
wipe_background=False,
drop_shadow=shadow
)
# Caption Text
if self.menu_items[self.pos][3]:
# It has caption text, draw it
self.plotter.text_at(self.menu_items[self.pos][3],
Vec2d(400, 75))
# Some info :)
tmp_color = (50, 185, 10)
self.plotter.text_at("Contact:", Vec2d(400, 520),
color=tmp_color,
wipe_background=False)
self.plotter.text_at("Conquer Dev Team http://pyconquer.googlecode.com/",
Vec2d(400, 545),
color=tmp_color,
wipe_background=False)
def scroll(self, dy):
# Change the selected menu item
self.pos += dy
if self.pos < 0:
self.pos = len(self.menu_items) - 1
if self.pos == len(self.menu_items):
self.pos = 0
def edit_value(self, dv):
# This is totally unreadable :D
# Well it edits values in their border values
if len(self.menu_items[self.pos][2]) >= 2:
if self.menu_items[self.pos][2][0] == "value_int_editor":
self.menu_items[self.pos][2][1] += dv
if len(self.menu_items[self.pos][2]) >= 3:
if self.menu_items[self.pos][2][1] < self.menu_items[self.pos][2][2][0]:
self.menu_items[self.pos][2][1] = self.menu_items[self.pos][2][2][0]
if self.menu_items[self.pos][2][1] > self.menu_items[self.pos][2][2][1]:
self.menu_items[self.pos][2][1] = self.menu_items[self.pos][2][2][1]
if self.menu_items[self.pos][2][0] == "value_bool_editor":
self.menu_items[self.pos][2][1] = not self.menu_items[self.pos][2][1]
def get_selection(self, text=None):
"""
Render the menu as long as user selects a menuitem
text -> optional text to be rendered
"""
# Draw the items
self.draw_items(text)
# Create instance of pygame Clock
clock = pygame.time.Clock()
# Endless loop
while True:
# Limit fps to 30
clock.tick(30)
# Iterate through events
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_DOWN:
self.scroll(1)
self.draw_items(text)
if e.key == pygame.K_UP:
self.scroll(-1)
self.draw_items(text)
if e.key == pygame.K_RETURN:
choice = self.select()
return choice
if e.key == pygame.K_LEFT:
self.edit_value(-1)
self.draw_items(text)
if e.key == pygame.K_RIGHT:
self.edit_value(1)
self.draw_items(text)
pygame.display.flip()
def select(self):
# User selects a menu item
return self.menu_items[self.pos][1]
# end of class TGameMenu
###################################################################
def text_input(plotter, caption, corner_1, span_vec, fonts, only_numbers=False):
# Make an input-box and prompt it for input
assert isinstance(corner_1, Vec2d)
x1 = corner_1.x
y1 = corner_1.y
assert isinstance(span_vec, Vec2d)
w1 = span_vec.x
h1 = span_vec.y
cur_str = []
pygame.draw.rect(plotter.screen, (30, 30, 30), (x1, y1, w1, h1))
plotter.text_at(caption, Vec2d(x1 + w1 / 4, y1), font=fonts.font_2, wipe_background=False)
pygame.display.flip()
done = False
while not done:
for e in pygame.event.get():
key = None
#e = pygame.event.poll()
if e.type == pygame.NOEVENT:
# event queue is empty
time.sleep(0.1)
continue
if e.type == pygame.KEYDOWN:
key = e.key
else:
continue
if key == pygame.K_BACKSPACE:
if cur_str:
del cur_str[len(cur_str) - 1]
done = True
elif key == pygame.K_RETURN:
done = True
if (key <= 127) and (key != pygame.K_BACKSPACE):
if only_numbers:
if chr(key) in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]:
cur_str.append(chr(key))
else:
cur_str.append(chr(key))
cur_text_pos = Vec2d(x1 + (w1 / 2) - (len(cur_str) * 4), y1 + 15)
cur_font = fonts.font_4
plotter.text_at("".join(cur_str),
cur_text_pos,
wipe_background=False,
font=cur_font)
pygame.display.flip()
return "".join(cur_str)
def load_image_files_but_not_interface_image_files(image_handler, graphics_path):
tmp = pygame.image.load(graphics_path + "skull7.png").convert_alpha()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "skull")
tmp = pygame.image.load(graphics_path + "soldier.png").convert_alpha()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "soldier")
tmp = pygame.image.load(graphics_path + "armytent.png").convert_alpha()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "town")
tmp = pygame.image.load(graphics_path + "hextile2_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_1")
tmp = pygame.image.load(graphics_path + "hextile_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_2")
tmp = pygame.image.load(graphics_path + "hextile3_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_3")
tmp = pygame.image.load(graphics_path + "hextile4_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_4")
tmp = pygame.image.load(graphics_path + "hextile5_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_5")
tmp = pygame.image.load(graphics_path + "hextile6_.png").convert()
tmp.set_colorkey(tmp.get_at((0, 0)))
image_handler.add_image(tmp, "cell_6")
image_handler.add_image(pygame.image.load(graphics_path + "teksti.png").convert(), "logo")
image_handler.add_image(pygame.image.load(graphics_path + "mapedit.png").convert(), "mapedit")
def get_human_and_cpu_count(screen, fonts):
# This is very ugly piece of code.
# It ask for scenario editing and random generated map,
# how many human and cpu players will participate.
max_player = 6
text_pos = Vec2d(800 / 2 - 110, 300)
span_vec = Vec2d(240, 45)
# get number of human players
nr_of_h = 0
while True:
#input_raw = text_input(screen, 'How many human players (1-6)?',
# text_pos, span_vec, fonts, only_numbers=True)
# DEBUG:
input_raw = '2'
try:
nr_of_h = int(input_raw)
except:
continue
if 1 <= nr_of_h <= max_player:
break
# get number of ai players
nr_of_c = 0
min_nr_of_ai = 0
if nr_of_h < max_player:
if nr_of_h == 1:
min_nr_of_ai = 1
while True:
#input_raw = text_input(screen,
# 'How many cpu players (%d-%d)?' % (min_nr_of_ai, max_player - nr_of_h),
# text_pos, span_vec, fonts, only_numbers=True)
# DEBUG:
input_raw = '2'
try:
nr_of_c = int(input_raw)
except:
continue
if min_nr_of_ai <= nr_of_c <= (max_player - nr_of_h):
break
return nr_of_h, nr_of_c
|
alex-tug/conquer
|
gamemenu.py
|
Python
|
gpl-3.0
| 11,388
|
# -*- coding: utf-8 -*-
#
# (c) 2018 Alberto Planas <aplanas@gmail.com>
#
# This file is part of KManga.
#
# KManga is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KManga is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KManga. If not, see <http://www.gnu.org/licenses/>.
import logging
import os.path
import re
import time
import urllib.parse
import scrapy
from spidermonkey import Spidermonkey
import django
django.setup()
from proxy.models import Proxy
from proxy.utils import needs_proxy
logger = logging.getLogger(__name__)
class RetryPartial(object):
"""Middleware to consider partial results as errors."""
def __init__(self, settings):
self.error_codes = {
int(x) for x in settings.getlist('SMART_PROXY_ERROR_CODES')
}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
logger.debug('Process respose - url: %s, status: %s, '
'flags: %s' % (request.url, response.status,
response.flags))
is_partial = 'partial' in response.flags
if is_partial and response.status not in self.error_codes:
# Partial results, not considered as errors, are marked as
# incorrect.
logger.debug('Partial result - url: %s' % request.url)
response.status = 500
return response
class SmartProxy(object):
"""Middleware to add a proxy to certain requests."""
def __init__(self, settings):
self.error_codes = {
int(x) for x in settings.getlist('SMART_PROXY_ERROR_CODES')
}
self.retry_error_codes = {
int(x) for x in settings.getlist('RETRY_HTTP_CODES')
}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_request(self, request, spider):
# The proxy only works if the request comes from a spider that
# have an operation associated (`catalog`, `collection`, etc)
has_operation = hasattr(spider, '_operation')
operations = ('catalog', 'collection', 'latest', 'manga')
if not has_operation or spider._operation not in operations:
return
logger.debug('Process request - proxy: %s, url: %s' % (
request.meta['proxy'] if 'proxy' in request.meta else 'no',
request.url))
# If the proxy is already set, we are done
if 'proxy' in request.meta:
return
if needs_proxy(spider.name):
proxy = Proxy.objects.get_one(spider.name)
if proxy:
logger.info('Using proxy <%s> for request' % proxy)
request.meta['proxy'] = 'http://%s' % proxy.proxy
# Disable redirection when a proxy is in use
request.meta['dont_redirect'] = True
else:
logger.error('No proxy found for %s' % spider.name)
def process_response(self, request, response, spider):
if 'proxy' in request.meta:
logger.debug('Process respose - proxy: %s, url: %s, '
'status: %s, flags: %s' % (
request.meta['proxy'], request.url,
response.status, response.flags))
if response.status in self.retry_error_codes:
self._delete_proxy_from_request(request, spider)
elif response.status in self.error_codes:
# Some of the error codes are redirects, we need to
# check if this a valid redirect, to maintain the
# proxy and enable the redirect.
redirect = response.headers.get('Location', None)
valid = self._valid_redirect(response.status,
request.url,
redirect)
if valid:
logger.debug('Valid redirect - proxy: %s, from: %s, '
'to: %s, status: %s' % (
request.meta['proxy'],
request.url, redirect,
response.status))
# If valid, re-enable redirection
if 'dont_redirect' in request.meta:
del request.meta['dont_redirect']
else:
# If the status is one of the error codes that is
# not in the retry error code, we need to map as
# one of them, like HTTP 500.
logger.debug('Invalid redirect - proxy: %s, from: %s, '
'to: %s, status: %s' % (
request.meta['proxy'],
request.url, redirect,
response.status))
self._map_status_error(response)
self._delete_proxy_from_request(request, spider)
return response
def process_exception(self, request, exception, spider):
if 'proxy' in request.meta:
logger.debug('Process exception - proxy: %s, url: %s, '
'exception: %s' % (request.meta['proxy'],
request.url, exception))
self._delete_proxy_from_request(request, spider)
def _map_status_error(self, response):
"""Set status code as 500 and remove the Content-Encoding."""
# Some proxies set the Content-Encoding section for partial
# results, or redirects (that do not containt data). This can
# cause problems in the httpcompression middleware.
response.status = 500
if 'Content-Encoding' in response.headers:
del response.headers['Content-Encoding']
def _delete_proxy_from_request(self, request, spider):
proxy = request.meta['proxy'].lstrip('htp:/')
del request.meta['proxy']
Proxy.objects.discard(proxy, spider.name)
logger.warning('Removing failed proxy <%s>, %d proxies left' % (
proxy, Proxy.objects.remainings(spider=spider.name)))
def _valid_redirect(self, status, url_from, url_to):
"""Implement some heuristics to detect valid redirections."""
# Check that status code is a redirection
if not 300 <= status < 400:
return False
# Same domain check
bn_from = os.path.basename(urllib.parse.urlparse(url_from).path)
bn_to = os.path.basename(urllib.parse.urlparse(url_to).path)
if bn_from != bn_to:
return False
# Ends in .html check
if not url_to.endswith('.html'):
return False
return True
class VHost(object):
"""Middleware to replace the host name with the IP."""
def process_request(self, request, spider):
"""Replace the host name with the IP."""
if hasattr(spider, 'vhost_ip'):
for domain in spider.allowed_domains:
ip = spider.vhost_ip
url = re.sub(r'(www.)?%s' % domain, ip, request.url)
# During the second pass, both URL are the same (there
# is not replacement)
if request.url != url:
request = request.replace(url=url,
headers={'Host': domain})
return request
def process_response(self, request, response, spider):
"""Replace back the IP with the host name."""
if hasattr(spider, 'vhost_ip'):
headers = request.headers.to_unicode_dict()
domain = headers.get('Host', spider.allowed_domains[0])
ip = spider.vhost_ip
url = re.sub(ip, domain, response.url)
response = response.replace(url=url)
return response
class CloudFlare(object):
"""Middleware to bypass the CloudFlare protection."""
def process_response(self, request, response, spider):
"""Resolve the CloudFlare challenge."""
request_response = response
if hasattr(spider, 'cloudflare') and spider.cloudflare:
if response.status == 503 and response.headers['Server']:
logger.debug('CloudFlare challenge detected')
request_response = self._cloudflare(request, response, spider)
# We resolve it once per request
spider.cloudflare = False
return request_response
def _cloudflare(self, request, response, spider):
"""Resolve the CloudFlare challenge."""
# Extract the URL from the form
xp = '//form/@action'
url = response.xpath(xp).extract_first()
url = response.urljoin(url)
domain = spider.allowed_domains[0]
# Extract the parameters from the form
xp = '//form/input[@name="jschl_vc"]/@value'
jschl_vc = response.xpath(xp).extract_first()
xp = '//form/input[@name="pass"]/@value'
pass_ = response.xpath(xp).extract_first()
if jschl_vc and pass_:
# Extract the JavaScript snippets that can be evaluated
xp = '//script/text()'
init = response.xpath(xp).re_first(r'var s,t,o,p.*')
challenge = response.xpath(xp).re_first(r'(.*;)a.value')
variable = response.xpath(xp).re_first(r'\s+;(\w+\.\w+).=')
result = 'print((%s+%s).toFixed(10))' % (variable, len(domain))
code = (init, challenge)
proc = Spidermonkey(early_script_file='-', code=code)
stdout, stderr = proc.communicate(result)
jschl_answer = stdout.strip()
logger.debug('Challenge response: %s', jschl_answer)
# Generate the new request
formdata = {
'jschl_vc': jschl_vc,
'pass': pass_,
'jschl_answer': jschl_answer,
}
original_url = request.url
request = scrapy.FormRequest.from_response(
response, formdata=formdata)
request.headers['Referer'] = original_url
# XXX TODO - Is there a way to delay this single request?
time.sleep(4)
return request
else:
# The challenge changed and the code is outdated
logger.error('CloudFlare challenge changed. Please update')
return response
|
aplanas/kmanga
|
scraper/scraper/middlewares.py
|
Python
|
gpl-3.0
| 10,942
|
import os
import argparse
import datetime
import yaml
import api.src.common.initial_environment_config
from ..models.dense import create_model
from ..data_processing.data_generator import DataGenerator
from ..common.config import TrainingConfig, DataConfig, Config
from ..common.utils import print_info, ensure_dir
from .plot_trainings import get_description_string
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, LearningRateScheduler, EarlyStopping
RUNNING_TIME = datetime.datetime.now().strftime("%H_%M_%d_%m_%y")
def train(num_epochs, batch_size, input_size, num_workers):
if not Config.NO_SAVE:
ensure_dir(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME))
model = create_model((2592,))
callbacks = [
ModelCheckpoint(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'weights.h5'), save_best_only=True, monitor=TrainingConfig.callbacks_monitor),
CSVLogger(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'history.csv')),
LearningRateScheduler(TrainingConfig.schedule),
EarlyStopping(patience=5)
]if not Config.NO_SAVE else []
if not Config.NO_SAVE:
introduced_change = input("What new was introduced?: ")
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'change.txt'), 'w') as f:
f.write(introduced_change)
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'config.yml'), 'w') as f:
yaml.dump(list([TrainingConfig.get_config(), Config.get_config(), DataConfig.get_config()]), f, default_flow_style=False)
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'model.txt'), 'w') as f:
f.write(get_description_string(model))
optimizer = TrainingConfig.optimizer
data_generator_train = DataGenerator(DataConfig.PATHS['TRAINING_PROCESSED_DATA'], batch_size, input_size, False, True)
data_generator_valid = DataGenerator(DataConfig.PATHS['VALID_PROCESSED_DATA'], batch_size, input_size, True, True)
model.compile(optimizer, TrainingConfig.loss, metrics=TrainingConfig.metrics)
model.fit_generator(data_generator_train, samples_per_epoch=data_generator_train.samples_per_epoch, nb_epoch=num_epochs,
validation_data=data_generator_valid, nb_val_samples=data_generator_valid.samples_per_epoch,
callbacks=callbacks)
def main(args):
print_info("Training")
train(args.num_epochs, args.batch_size, args.input_size, args.num_workers)
print_info("Finished")
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Script performing training')
argparser.add_argument('--num_epochs', default=TrainingConfig.NB_EPOCHS, type=int, help='Number of training epochs')
argparser.add_argument('--num_workers', type=int, default=TrainingConfig.NUM_WORKERS, help='Number of workers during training')
argparser.add_argument('--batch_size', type=int, default=TrainingConfig.BATCH_SIZE, help='Batch size')
argparser.add_argument('--input_size', type=int, default=Config.IMAGE_SIZE, help='Image size to input')
arguments = argparser.parse_args()
main(arguments)
|
kacper1095/asl-translator
|
api/src/scripts/train_simple_network.py
|
Python
|
gpl-3.0
| 3,211
|
# -*- coding: utf-8 -*-
# -*- Channel PeliculasySeries -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'la': 'Latino', 'lat':'Latino', 'cas':'Castellano','es': 'Castellano', 'vs': 'VOSE', 'vos':'VOSE', 'vo':'VO',
'ori':'VO', 'so':'VOS', 'sor':'VOS'}
list_language = IDIOMAS.values()
list_quality = ['TS','Screener','DVDRip','HDRip', 'HDTV', 'micro720', 'micro1080']
list_servers = ['openload', 'rapidvideo', 'powvideo', 'gamovideo', 'streamplay', 'flashx', 'clipwatching', 'vidoza',
'thevideome']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasyseries')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasyseries')
host = 'https://peliculasyseries.org/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'series', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'buscar/q/', thumbnail=get_thumb("search", auto=True),
extra='movie'))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_data = lang_data.replace('language-ES', '').replace('medium', '').replace('serie', '').replace('-','')
if 'class' in lang_data:
lang_list = scrapertools.find_multiple_matches(lang_data, 'class=" ([^"]+)"')
else:
return lang_data.strip()
for lang in lang_list:
if lang not in IDIOMAS:
lang = 'VOS'
if lang not in language:
language.append(IDIOMAS[lang])
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host)
data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"')
if 'Genero' in item.title:
patron = '<li><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?'
patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">'
patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
if 'screener' in quality.lower():
quality = 'Screener'
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<a class='last' href='([^']+)'>»</a>")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<a href="([^"]+)"><img class="thumb-item" src="([^"]+)" alt="[^"]+" >'
patron += '<div class="season-item">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
thumbnail=scrapedthumbnail, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron ='class="row-serie-item"><a href="([^"]+)">.*?<img class="episode-thumb-item" src="([^"]+)" alt="([^"]+)" >'
patron += '<divclass="audio-info-series">(.*?)<div class="episode-item">%s+x(\d+)</div>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
language = get_language(lang_data)
title = '%sx%s - %s %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle, language)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
thumbnail=scrapedthumbnail, language=language, infoLabels=infoLabels))
itemlist = filtertools.get_links(itemlist, item, list_language)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
itemlist = []
data = get_source(item.url)
patron = '<div class="available-source" ><div class="([^"]+)">.*?'
patron += 'data-data="([^"]+)".*?<span class="quality-text">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedurl, quality in matches:
lang = get_language(lang_data)
if 'screener' in quality.lower():
quality = 'Screener'
quality = quality
title = '%s [%s] [%s]'
url = base64.b64decode(scrapedurl[1:])
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.quality, x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<li class="search-results-item media-item" .*?<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="content" src="([^"]+)" .*?>(Pelicula|Serie) del año([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches:
title = scrapedtitle
if len(year)==0:
year = '-'
url = scrapedurl
thumbnail = scrapedthumb
if not '/serie' in url:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, action=action,
infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies'
elif categoria == 'infantiles':
item.url = host + 'genero/animation/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
|
alfa-jor/addon
|
plugin.video.alfa/channels/peliculasyseries.py
|
Python
|
gpl-3.0
| 12,489
|
"""
Unit tests for utility functions.
"""
import numpy as np
import pytest
from thrifty import util
@pytest.mark.parametrize("num", [15, 16])
def test_fft_bin(num):
"""Validate fft_bin against numpy's fftfreq function."""
expected = np.fft.fftfreq(num, 1./num)
got = np.array([util.fft_bin(i, num) for i in range(num)])
np.testing.assert_array_equal(got, expected)
|
swkrueger/Thrifty
|
tests/test_util.py
|
Python
|
gpl-3.0
| 385
|
#
# Delete a device from a group
# Copyright © 2020 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
from commands.ServerCommand import ServerCommand
from database.action_group_devices import ActionGroupDevices
class DeleteActionGroupDevice(ServerCommand):
"""
Command handler for assigning a device to a group
"""
def Execute(self, request):
device_id = request["args"]["device-id"]
group_id = request["args"]["group-id"]
agd = ActionGroupDevices()
result = agd.delete_device(group_id, device_id)
# Generate a successful response
r = self.CreateResponse(request["request"])
# The result is the number of devices deleted
if result >= 0:
r['result-code'] = 0
r['group-id'] = group_id
r['device_id'] = device_id
r['message'] = "Success"
else:
# Probably invalid device type
r['result-code'] = 1
r['error'] = 1
r['message'] = "Failure"
return r
|
dhocker/athomepowerlineserver
|
commands/delete_group_device.py
|
Python
|
gpl-3.0
| 1,253
|
#!/usr/bin/python3
from pyrob.api import *
@task
def task_8_28():
if wall_is_above() != True:
while (wall_is_above() != True):
move_up()
while (wall_is_on_the_left() != True):
move_left()
while(wall_is_on_the_right() != True and wall_is_beneath() and wall_is_above()):
move_right()
if wall_is_above() != True:
while (wall_is_above() != True):
move_up()
while(wall_is_on_the_left() != True):
move_left()
while (wall_is_on_the_left() != True and wall_is_beneath() and wall_is_above()):
move_left()
if wall_is_above() != True:
while (wall_is_above() != True):
move_up()
while (wall_is_on_the_left() != True):
move_left()
if __name__ == '__main__':
run_tasks()
|
lesina/labs2016
|
Laba04/task_18.py
|
Python
|
gpl-3.0
| 862
|
from unidown.tools import unlink_dir_rec
class TestDeleteDirRec:
def test_non_existence(self, tmp_path):
no_folder = tmp_path.joinpath("./donotexist/")
assert not no_folder.exists()
unlink_dir_rec(no_folder)
assert not no_folder.exists()
def test_recursive(self, tmp_path):
for number in range(1, 4):
with tmp_path.joinpath(str(number)).open('w'):
pass
sub_folder = tmp_path.joinpath("sub")
sub_folder.mkdir(parents=True, exist_ok=True)
for number in range(1, 4):
with sub_folder.joinpath(str(number)).open('w'):
pass
tmp_path.joinpath("sub2").mkdir()
unlink_dir_rec(tmp_path)
assert not tmp_path.exists()
|
IceflowRE/MR-eBook-Downloader
|
tests/tools_test.py
|
Python
|
gpl-3.0
| 762
|
"""Constants used in Mackup."""
# Current version
VERSION = '0.8.7'
# Support platforms
PLATFORM_DARWIN = 'Darwin'
PLATFORM_LINUX = 'Linux'
# Directory containing the application configs
APPS_DIR = 'applications'
# Mackup application name
MACKUP_APP_NAME = 'mackup'
# Default Mackup backup path where it stores its files in Dropbox
MACKUP_BACKUP_PATH = 'Mackup'
# Mackup config file
MACKUP_CONFIG_FILE = '.mackup.cfg'
# Directory that can contains user defined app configs
CUSTOM_APPS_DIR = '.mackup'
# Supported engines
ENGINE_DROPBOX = 'dropbox'
ENGINE_GDRIVE = 'google_drive'
ENGINE_BOX = 'box'
ENGINE_COPY = 'copy'
ENGINE_ICLOUD = 'icloud'
ENGINE_FS = 'file_system'
|
Timidger/mackup
|
mackup/constants.py
|
Python
|
gpl-3.0
| 677
|
class Solution(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
inf = float('inf')
n = len(x)
if n < 3:
return False
ruld = [0, 0, 0, 0] # right, up, left, down
next_max = inf
current = [-x[1], x[0]]
for i, elem in enumerate(x[2:], 2):
i %= 4
if elem >= next_max:
return True
xy = 1 if i in {0, 2} else 0
pn = 1 if i in {0, 3} else -1
new = current[xy] + pn * elem
if pn * new > pn * ruld[i - 3]:
next_max = inf
else:
if next_max is inf and pn * new >= pn * ruld[i - 1]:
ruld[i - 2] = ruld[i]
next_max = abs(ruld[i - 2] - current[xy ^ 1])
ruld[i - 1], current[xy] = current[xy], new
return False
assert Solution().isSelfCrossing([2, 1, 1, 2])
assert not Solution().isSelfCrossing([1, 2, 3, 4])
assert Solution().isSelfCrossing([1, 1, 1, 1])
assert not Solution().isSelfCrossing([3,3,4,2,2])
assert Solution().isSelfCrossing([1,1,2,1,1])
assert not Solution().isSelfCrossing([3,3,3,2,1,1])
|
wufangjie/leetcode
|
335. Self Crossing.py
|
Python
|
gpl-3.0
| 1,217
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/stuart/src/kdevelop projects/tennis/player_create.ui'
#
# Created: Mon Nov 3 22:00:23 2014
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_PlayerCreateWindow(object):
def setupUi(self, PlayerCreateWindow):
PlayerCreateWindow.setObjectName(_fromUtf8("PlayerCreateWindow"))
PlayerCreateWindow.resize(738, 379)
self.centralwidget = QtGui.QWidget(PlayerCreateWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetMinAndMaxSize)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_2 = QtGui.QGridLayout(self.tab)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.formLayout = QtGui.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.formLayout.setLabelAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignVCenter)
self.formLayout.setFormAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.firstNameLabel = QtGui.QLabel(self.tab)
self.firstNameLabel.setObjectName(_fromUtf8("firstNameLabel"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.firstNameLabel)
self.LineEdit_firstname = QtGui.QLineEdit(self.tab)
self.LineEdit_firstname.setObjectName(_fromUtf8("LineEdit_firstname"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.LineEdit_firstname)
self.middleNameLabel = QtGui.QLabel(self.tab)
self.middleNameLabel.setObjectName(_fromUtf8("middleNameLabel"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.middleNameLabel)
self.LineEdit_middlename = QtGui.QLineEdit(self.tab)
self.LineEdit_middlename.setObjectName(_fromUtf8("LineEdit_middlename"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.LineEdit_middlename)
self.familyNameLabel = QtGui.QLabel(self.tab)
self.familyNameLabel.setObjectName(_fromUtf8("familyNameLabel"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.familyNameLabel)
self.LineEdit_familyname = QtGui.QLineEdit(self.tab)
self.LineEdit_familyname.setObjectName(_fromUtf8("LineEdit_familyname"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.LineEdit_familyname)
self.tickerNameLabel = QtGui.QLabel(self.tab)
self.tickerNameLabel.setObjectName(_fromUtf8("tickerNameLabel"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.tickerNameLabel)
self.LineEdit_tickername = QtGui.QLineEdit(self.tab)
self.LineEdit_tickername.setObjectName(_fromUtf8("LineEdit_tickername"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.LineEdit_tickername)
self.dateOfBirthLabel = QtGui.QLabel(self.tab)
self.dateOfBirthLabel.setObjectName(_fromUtf8("dateOfBirthLabel"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.dateOfBirthLabel)
self.DateEdit_dateofbirth = QtGui.QDateEdit(self.tab)
self.DateEdit_dateofbirth.setDate(QtCore.QDate(1980, 1, 1))
self.DateEdit_dateofbirth.setCalendarPopup(True)
self.DateEdit_dateofbirth.setObjectName(_fromUtf8("DateEdit_dateofbirth"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.DateEdit_dateofbirth)
self.frame = QtGui.QFrame(self.tab)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.formLayout.setWidget(8, QtGui.QFormLayout.FieldRole, self.frame)
self.comboBox_gender = QtGui.QComboBox(self.tab)
self.comboBox_gender.setObjectName(_fromUtf8("comboBox_gender"))
self.comboBox_gender.addItem(_fromUtf8(""))
self.comboBox_gender.setItemText(0, _fromUtf8(""))
self.comboBox_gender.addItem(_fromUtf8(""))
self.comboBox_gender.addItem(_fromUtf8(""))
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.comboBox_gender)
self.label = QtGui.QLabel(self.tab)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label)
self.gridLayout_2.addLayout(self.formLayout, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.formLayoutWidget_2 = QtGui.QWidget(self.tab_2)
self.formLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 501, 201))
self.formLayoutWidget_2.setObjectName(_fromUtf8("formLayoutWidget_2"))
self.formLayout_2 = QtGui.QFormLayout(self.formLayoutWidget_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.formLayout_2.setLabelAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignVCenter)
self.formLayout_2.setFormAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.formLayout_2.setMargin(0)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.firstNameLabel_2 = QtGui.QLabel(self.formLayoutWidget_2)
self.firstNameLabel_2.setObjectName(_fromUtf8("firstNameLabel_2"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.firstNameLabel_2)
self.LineEdit_height = QtGui.QLineEdit(self.formLayoutWidget_2)
self.LineEdit_height.setObjectName(_fromUtf8("LineEdit_height"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.LineEdit_height)
self.middleNameLabel_2 = QtGui.QLabel(self.formLayoutWidget_2)
self.middleNameLabel_2.setObjectName(_fromUtf8("middleNameLabel_2"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.middleNameLabel_2)
self.LineEdit_birthplace = QtGui.QLineEdit(self.formLayoutWidget_2)
self.LineEdit_birthplace.setObjectName(_fromUtf8("LineEdit_birthplace"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.LineEdit_birthplace)
self.familyNameLabel_2 = QtGui.QLabel(self.formLayoutWidget_2)
self.familyNameLabel_2.setObjectName(_fromUtf8("familyNameLabel_2"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.familyNameLabel_2)
self.LineEdit_residence = QtGui.QLineEdit(self.formLayoutWidget_2)
self.LineEdit_residence.setObjectName(_fromUtf8("LineEdit_residence"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.LineEdit_residence)
self.tickerNameLabel_2 = QtGui.QLabel(self.formLayoutWidget_2)
self.tickerNameLabel_2.setObjectName(_fromUtf8("tickerNameLabel_2"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.tickerNameLabel_2)
self.LineEdit_rank = QtGui.QLineEdit(self.formLayoutWidget_2)
self.LineEdit_rank.setObjectName(_fromUtf8("LineEdit_rank"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.LineEdit_rank)
self.label_2 = QtGui.QLabel(self.formLayoutWidget_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_2)
self.comboBox_handedness = QtGui.QComboBox(self.formLayoutWidget_2)
self.comboBox_handedness.setObjectName(_fromUtf8("comboBox_handedness"))
self.comboBox_handedness.addItem(_fromUtf8(""))
self.comboBox_handedness.setItemText(0, _fromUtf8(""))
self.comboBox_handedness.addItem(_fromUtf8(""))
self.comboBox_handedness.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.comboBox_handedness)
self.frame_2 = QtGui.QFrame(self.formLayoutWidget_2)
self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName(_fromUtf8("frame_2"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.FieldRole, self.frame_2)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.verticalLayout.addWidget(self.tabWidget)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetMinAndMaxSize)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.pushButton_DELETE_ALL_PLAYERS = QtGui.QPushButton(self.centralwidget)
self.pushButton_DELETE_ALL_PLAYERS.setMinimumSize(QtCore.QSize(0, 50))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.pushButton_DELETE_ALL_PLAYERS.setPalette(palette)
self.pushButton_DELETE_ALL_PLAYERS.setObjectName(_fromUtf8("pushButton_DELETE_ALL_PLAYERS"))
self.horizontalLayout.addWidget(self.pushButton_DELETE_ALL_PLAYERS)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton_EXIT = QtGui.QPushButton(self.centralwidget)
self.pushButton_EXIT.setMinimumSize(QtCore.QSize(10, 50))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(223, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(128, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(96, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(192, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.pushButton_EXIT.setPalette(palette)
self.pushButton_EXIT.setObjectName(_fromUtf8("pushButton_EXIT"))
self.horizontalLayout.addWidget(self.pushButton_EXIT)
self.pushButton_SAVE_EXIT = QtGui.QPushButton(self.centralwidget)
self.pushButton_SAVE_EXIT.setMinimumSize(QtCore.QSize(0, 50))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(16, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 223, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(16, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 223, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 255, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(16, 223, 16))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 128, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 96, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 192, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.pushButton_SAVE_EXIT.setPalette(palette)
self.pushButton_SAVE_EXIT.setObjectName(_fromUtf8("pushButton_SAVE_EXIT"))
self.horizontalLayout.addWidget(self.pushButton_SAVE_EXIT)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
PlayerCreateWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(PlayerCreateWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(PlayerCreateWindow)
def retranslateUi(self, PlayerCreateWindow):
PlayerCreateWindow.setWindowTitle(_translate("PlayerCreateWindow", "MainWindow", None))
self.firstNameLabel.setText(_translate("PlayerCreateWindow", "First Name", None))
self.middleNameLabel.setText(_translate("PlayerCreateWindow", "Middle Name", None))
self.familyNameLabel.setText(_translate("PlayerCreateWindow", "Family Name", None))
self.tickerNameLabel.setText(_translate("PlayerCreateWindow", "Ticker Name", None))
self.dateOfBirthLabel.setText(_translate("PlayerCreateWindow", "Date of Birth", None))
self.DateEdit_dateofbirth.setDisplayFormat(_translate("PlayerCreateWindow", "d/M/yyyy", None))
self.comboBox_gender.setItemText(1, _translate("PlayerCreateWindow", "Male", None))
self.comboBox_gender.setItemText(2, _translate("PlayerCreateWindow", "Female", None))
self.label.setText(_translate("PlayerCreateWindow", "Gender", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("PlayerCreateWindow", "Player details", None))
self.firstNameLabel_2.setText(_translate("PlayerCreateWindow", "Height", None))
self.middleNameLabel_2.setText(_translate("PlayerCreateWindow", "Birthplace", None))
self.familyNameLabel_2.setText(_translate("PlayerCreateWindow", "Residence", None))
self.tickerNameLabel_2.setText(_translate("PlayerCreateWindow", "Rank", None))
self.label_2.setText(_translate("PlayerCreateWindow", "Handedness", None))
self.comboBox_handedness.setItemText(1, _translate("PlayerCreateWindow", "Left handed", None))
self.comboBox_handedness.setItemText(2, _translate("PlayerCreateWindow", "Right handed", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("PlayerCreateWindow", "Profile info", None))
self.pushButton_DELETE_ALL_PLAYERS.setText(_translate("PlayerCreateWindow", "Delete ALL\n"
"Players", None))
self.pushButton_EXIT.setText(_translate("PlayerCreateWindow", "&EXIT", None))
self.pushButton_SAVE_EXIT.setText(_translate("PlayerCreateWindow", "&SAVE && EXIT", None))
|
stustustu123/tennis
|
Ui_player_create.py
|
Python
|
gpl-3.0
| 38,024
|
#!/usr/bin/env python
"""
This script plots various quantities.
"""
from __future__ import division, print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
ylabels = {"cl": r"$C_l$", "cd": r"$C_d$", "cl/cd": r"$C_l/C_d$", "k": "$k$",
"omega": r"$\omega$", "epsilon": r"$\epsilon$"}
def plot_foil_perf(quantity="cl/cd", foil="0012", Re=2e5):
df = pd.read_csv("processed/NACA{}_{:.1e}.csv".format(foil, Re))
plt.figure()
if quantity == "cl/cd":
q = df.cl/df.cd
else:
q = df[quantity]
plt.plot(df.alpha_deg, q, "-o")
plt.xlabel(r"$\alpha$ (deg)")
plt.ylabel(ylabels[quantity])
plt.grid(True)
plt.tight_layout()
if __name__ == "__main__":
try:
import seaborn
seaborn.set(style="white", context="notebook", font_scale=1.5)
except ImportError:
print("Could not import seaborn for plot styling. Try")
print("\n conda install seaborn\n\nor")
print("\n pip install seaborn\n")
parser = argparse.ArgumentParser(description="Plotting results")
parser.add_argument("quantity", nargs="?", default="cl/cd",
help="Which quantity to plot",
choices=["cl", "cd", "cl/cd", "k", "omega", "epsilon"])
parser.add_argument("--foil", "-f", help="Foil", default="0012")
parser.add_argument("--Reynolds", "-R", help="Reynolds number", default=2e5)
parser.add_argument("--save", "-s", action="store_true", help="Save plots")
parser.add_argument("--noshow", action="store_true", default=False,
help="Do not show")
args = parser.parse_args()
plot_foil_perf(args.quantity, args.foil, float(args.Reynolds))
if args.save:
if not os.path.isdir("figures"):
os.mkdir("figures")
plt.savefig("figures/{}.pdf".format(args.quantity))
if not args.noshow:
plt.show()
|
karasinski/NACAFoil-OpenFOAM
|
plot.py
|
Python
|
gpl-3.0
| 1,969
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
import glob
import os
import shutil
import subprocess
import sys
config = RawConfigParser(allow_no_value=True)
config.read(os.path.join('gridsync', 'resources', 'config.txt'))
settings = {}
for section in config.sections():
if section not in settings:
settings[section] = {}
for option, value in config.items(section):
settings[section][option] = value
name = settings['application']['name']
name_lower = name.lower()
linux_icon = settings['build']['linux_icon']
appdir_usr = os.path.join('build', 'AppDir', 'usr')
appdir_bin = os.path.join(appdir_usr, 'bin')
try:
os.makedirs(appdir_usr)
except OSError:
pass
try:
shutil.copytree(os.path.join('dist', name), appdir_bin)
except OSError:
pass
_, ext = os.path.splitext(linux_icon)
icon_filepath = os.path.abspath(os.path.join('build', name_lower + ext))
shutil.copy2(linux_icon, icon_filepath)
desktop_filepath = os.path.join('build', '{}.desktop'.format(name))
with open(desktop_filepath, 'w') as f:
f.write('''[Desktop Entry]
Categories=Utility;
Type=Application
Name={0}
Exec={1}
Icon={1}
'''.format(name, name_lower)
)
os.environ['LD_LIBRARY_PATH'] = appdir_bin
os.environ['APPIMAGE_EXTRACT_AND_RUN'] = '1'
linuxdeploy_args = [
'linuxdeploy',
'--appdir=build/AppDir',
'--executable={}'.format(os.path.join(appdir_usr, 'bin', name_lower)),
'--icon-file={}'.format(icon_filepath),
'--desktop-file={}'.format(desktop_filepath),
]
try:
returncode = subprocess.call(linuxdeploy_args)
except OSError:
sys.exit(
'ERROR: `linuxdeploy` utility not found. Please ensure that it is '
'on your $PATH and executable as `linuxdeploy` and try again.\n'
'`linuxdeploy` can be downloaded from https://github.com/linuxdeploy/'
'linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage'
)
if returncode:
# XXX Ugly hack/workaround for "ERROR: Strip call failed: /tmp/.mount_linuxdns8a8k/usr/bin/strip: unable to copy file 'build/AppDir/usr/lib/libpython3.7m.so.1.0'; reason: Permission denied" observed on Travis-CI
os.chmod(glob.glob('build/AppDir/usr/lib/libpython*.so.*')[0], 0o755)
subprocess.call(linuxdeploy_args)
for file in sorted(os.listdir(appdir_bin)):
# The `linuxdeploy` utility adds a copy of each library to AppDir/usr/lib,
# however, the main PyInstaller-generated ("gridsync") executable expects
# these libraries to be located in the same directory as the ("gridsync")
# executable itself (resulting in *two* copies of each library and thus
# wasted disk-space); removing the copies inserted by `linuxdeploy` -- and
# and replacing them with symlinks to the originals -- saves disk-space.
dst = 'build/AppDir/usr/lib/{}'.format(file)
if os.path.exists(dst):
try:
os.remove(dst)
except OSError:
print('WARNING: Could not remove file {}'.format(dst))
continue
src = '../bin/{}'.format(file)
print('Creating symlink: {} -> {}'.format(dst, src))
try:
os.symlink(src, dst)
except OSError:
print('WARNING: Could not create symlink for {}'.format(dst))
os.remove('build/AppDir/AppRun')
with open('build/AppDir/AppRun', 'w') as f:
f.write('''#!/bin/sh
exec "$(dirname "$(readlink -e "$0")")/usr/bin/{}" "$@"
'''.format(name_lower)
)
os.chmod('build/AppDir/AppRun', 0o755)
# Create the .DirIcon symlink here/now to prevent appimagetool from
# doing it later, thereby allowing the atime and mtime of the symlink
# to be overriden along with all of the other files in the AppDir.
try:
os.symlink(os.path.basename(icon_filepath), "build/AppDir/.DirIcon")
except OSError:
pass
subprocess.call(["python3", "scripts/update_permissions.py", "build/AppDir"])
subprocess.call(["python3", "scripts/update_timestamps.py", "build/AppDir"])
try:
os.mkdir('dist')
except OSError:
pass
try:
subprocess.call([
'appimagetool', 'build/AppDir', 'dist/{}.AppImage'.format(name)
])
except OSError:
sys.exit(
'ERROR: `appimagetool` utility not found. Please ensure that it is '
'on your $PATH and executable as `appimagetool` and try again.\n'
'`appimagetool` can be downloaded from https://github.com/AppImage/A'
'ppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage'
)
|
gridsync/gridsync
|
scripts/make_appimage.py
|
Python
|
gpl-3.0
| 4,585
|
#!/bin/false
# -*- coding: utf-8 -*-
from objects.orobject import OrObject
from objects.function import Function
from objects.number import Number
from objects.file import File
from objects.inheritdict import InheritDict
from objects.ordict import OrDict
from objects.orddict import ODict
import objects.console as console
import objects.exception as exception
import objects.orstring as orstring
import types
import libbuiltin
def expose(r, n=""):
v = OrObject.from_py(r)
if n:
v.name = n
return v
builtin = InheritDict()
builtin.update({
"int": expose(libbuiltin.toint),
"num": expose(Number),
"dict": expose(OrDict),
"odict": expose(ODict),
"set": expose(set),
"io": expose(console.io),
"file": expose(File),
"input": expose(console.input),
"output": expose(console.output),
"error": expose(console.error),
"endl": expose("\n"),
"repr": expose(repr),
"join": expose(libbuiltin.join),
"range": expose(range),
"type": expose(libbuiltin.typeof, "type"),
"dir": expose(libbuiltin.dirof, "dir"),
"attrs": expose(libbuiltin.attrsof, "attrs"),
"reverse": expose(reversed),
"sort": expose(sorted),
"chr": expose(unichr),
"Exception": expose(Exception),
"hasattr": expose(OrObject.has, "hasattr"),
"getattr": expose(OrObject.get, "getattr"),
"setattr": expose(OrObject.set, "setattr"),
})
stolen_builtins = [
'abs', 'all', 'any', 'bool', 'callable', #buffer
'cmp', #chr (not as unichr)
'dict', 'divmod', 'enumerate', #delattr
'exit', 'filter', # frozenset
'hash', 'id', #get/hasattr
'iter', 'len', 'list',
'map', 'max', 'min', 'ord', # object
'range', 'repr', #property
'round', 'set', 'slice', #setattr
'str', 'sum', 'unicode', #super
'zip'
]
for i in stolen_builtins:
builtin[i] = expose(__builtins__[i])
|
pavpanchekha/oranj
|
oranj/core/builtin.py
|
Python
|
gpl-3.0
| 1,879
|
#!/usr/bin/env python3
# Copyright 2020 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0+
import sys
from wazo_auth_client import Client as AuthClient
from wazo_confd_client import Client as ConfdClient
from xivo.chain_map import ChainMap
from xivo.config_helper import read_config_file_hierarchy, parse_config_file
_DEFAULT_CONFIG = {
'config_file': '/etc/wazo-upgrade/config.yml',
'auth': {
'key_file': '/var/lib/wazo-auth-keys/wazo-upgrade-key.yml'
}
}
def load_config():
file_config = read_config_file_hierarchy(_DEFAULT_CONFIG)
key_config = _load_key_file(ChainMap(file_config, _DEFAULT_CONFIG))
return ChainMap(key_config, file_config, _DEFAULT_CONFIG)
def _load_key_file(config):
key_file = parse_config_file(config['auth']['key_file'])
return {'auth': {'username': key_file['service_id'], 'password': key_file['service_key']}}
def list_broken_endpoints(confd_client):
endpoints = []
response = confd_client.lines.list()
for line in response['items']:
endpoint = line['endpoint_sip']
if not endpoint:
continue
name = endpoint.get('name')
auth_section_options = endpoint.get('auth_section_options', [])
username = None
for key, value in auth_section_options:
if key == 'username':
username = value
break
if name == username:
continue
endpoints.append(endpoint['uuid'])
return endpoints
def fix_endpoint(confd_client, endpoint_uuid):
endpoint = confd_client.endpoints_sip.get(endpoint_uuid)
name = endpoint['name']
auth_section_options = endpoint.get('auth_section_options', [])
for key, value in auth_section_options:
if key == 'username':
auth_section_options.remove(['username', value])
auth_section_options.append(['username', name])
confd_client.endpoints_sip.update(endpoint)
def main(tenant_uuid):
config = load_config()
auth_client = AuthClient(**config['auth'])
auth_client.set_tenant(tenant_uuid)
token_data = auth_client.token.new(expiration=300)
confd_client = ConfdClient(token=token_data['token'], **config['confd'])
confd_client.set_tenant(tenant_uuid)
endpoints_to_update = list_broken_endpoints(confd_client)
print('updating', len(endpoints_to_update), 'endpoints')
for endpoint_uuid in endpoints_to_update:
print('.', end='')
fix_endpoint(confd_client, endpoint_uuid)
print('done')
if __name__ == '__main__':
main(sys.argv[1])
|
wazo-pbx/xivo-tools
|
pjsip/sync-username-and-names.py
|
Python
|
gpl-3.0
| 2,588
|
from server.util import ScriptManager
def objectClick2_2213(player, obId, obX, obY):
player.getPA().openUpBank()
def objectClick2_11758(player, obId, obX, obY):
player.getPA().openUpBank()
|
TheRealVestige/VestigeX-Server
|
Data/scripts/player/objects/objectclick2.py
|
Python
|
gpl-3.0
| 196
|
import pygame
def arrow_image(color):
img = pygame.Surface((7, 6))
img.fill((226, 59, 252))
img.set_colorkey((226, 59, 252), pygame.RLEACCEL)
pygame.draw.polygon(img, color, ((0, 0), (3, 3), (6, 0)))
return img
class Menu(object):
def __init__(self, font, options):
self.font = font
self.options = options
self.option = 0
self.height = len(self.options)*(self.font.get_height())+(len(self.options)-1)*3
self.width = 0
for o in self.options:
w = (len(o)+1)*self.font.get_width()
if w > self.width:
self.width = w
def draw(self, surface, pos, background=None, border=None):
ypos = pos[1]
i = 0
if background:
pygame.draw.rect(surface, background, (pos[0]-4, pos[1]-4,
self.width+8, self.height+6))
if border:
pygame.draw.rect(surface, border, (pos[0]-4, pos[1]-4,
self.width+8, self.height+8), 1)
for opt in self.options:
if i == self.option:
icon = ">"
else:
icon = " "
ren = self.font.render(icon + opt)
surface.blit(ren, (pos[0], ypos))
ypos += ren.get_height()+3
i += 1
def move_cursor(self, dir):
if dir > 0:
if self.option < len(self.options)-1:
self.option += 1
elif dir < 0:
if self.option > 0:
self.option -= 1
def get_option(self):
return self.option, self.options[self.option]
class DialogBox(object):
def __init__(self, size, background_color, border_color, font):
self.dialog = []
self.image = pygame.Surface(size)
self.font = font
self.size = size
self.background_color = background_color
self.border_color = border_color
self.update_box()
self.text_pos = 0
self.shown = False
self.scroll_delay = 1
self.frame = 0
self.down_arrow = arrow_image(font.color)
self.curr_dialog=0
def set_scrolldelay(self, delay):
self.scroll_delay = delay
def set_dialog(self, dialog_list):
self.page = 0
self.pages = len(dialog_list)
self.dialog = dialog_list
self.shown = True
self.text_pos = 0
def update_box(self):
self.image.fill(self.background_color)
pygame.draw.rect(self.image, self.border_color,
(0, 0, self.size[0]-1, self.size[1]-1), 1)
def progress(self):
if (self.curr_dialog==0):
return
if (self.text_pos >= len(self.curr_dialog)):
if self.page < self.pages-1:
self.page += 1
self.text_pos = 0
else:
self.shown = False
else:
self.text_pos = len(self.curr_dialog)
def draw(self, surface, pos):
if self.shown and self.page < self.pages:
self.update_box()
self.curr_dialog = self.dialog[self.page]
xpos = 4
ypos = 4
if self.text_pos < len(self.curr_dialog):
self.frame -= 1
if self.frame <= 0:
self.text_pos += 1
self.frame = self.scroll_delay
else:
self.image.blit(self.down_arrow,
(self.image.get_width()-12,
self.image.get_height()-8))
dialog = self.curr_dialog[:self.text_pos]
for word in dialog.split(" "):
ren = self.font.render(word + " ")
w = ren.get_width()
if xpos > self.image.get_width()-w:
ypos += ren.get_height()+3
xpos = 4
self.image.blit(ren, (xpos, ypos))
xpos += w
surface.blit(self.image, pos)
def over(self):
return self.shown != True
def close(self):
self.shown = False
self.page = self.pages
|
jmimu/pyNekketsu
|
retrogamelib/dialog.py
|
Python
|
gpl-3.0
| 4,110
|
#Idol Mech Logic
__author__="jim"
__date__ ="$Dec 22, 2010 3:01:06 PM$"
import procgame
import locale
import logging
from procgame import *
base_path = config.value_for_key_path('base_path')
game_path = base_path+"games/indyjones/"
speech_path = game_path +"speech/"
sound_path = game_path +"sound/"
music_path = game_path +"music/"
class Idol(game.Mode):
def __init__(self, game, priority):
super(Idol, self).__init__(game, priority)
self.log = logging.getLogger('ij.idol')
self.position = 0
self.balls_in_idol = 0
self.balls_in_play = 0
self.ball_max = 3 #make this a game setting
self.idol_state="initialise"
self.idol_moving = False
self.balls_waiting = False
self.release= False
self.lock_lit = False
self.next_posn_set=False
self.next_posn_num=0
self.game.sound.register_sound('ball_release', sound_path+"elephant.aiff")
def reset(self):
pass
def mode_started(self):
#self.test()
#self.empty()
pass
def mode_tick(self):
#self.move_to_posn(1)
self.idol_control()
#pass
def move_to_posn(self,posn_num,callback=None):
if self.position==posn_num and self.idol_moving:
self.game.coils.wheelMotor.disable()
self.idol_moving =False
#self.game.set_status("Position: "+str(self.position))
self.release=False
self.idol_state='idle'
self.next_posn_num=self.position
self.next_posn_set=False
#callback once at position
self.callback=callback
if self.callback:
self.callback()
self.log.info("Reached Idol Destination - Posn: "+str(self.position))
elif self.position!=posn_num and self.idol_moving==False:
self.game.coils.wheelMotor.pulse(0)
self.idol_moving=True
self.log.info("First Reported Idol Position was: "+str(self.position))
# if self.idol_moving:
# self.delay(name='mtp_repeat', event_type=None, delay=0, handler=self.move_to_posn, param=posn_num)
def idol_control(self):
if self.idol_state=='initialise':
self.move_to_posn(1)
elif self.idol_state=='empty':
self.move_to_posn(2)
if self.position==2 and self.idol_moving==False and self.release==False:
self.game.coils.idolRelease.pulse(150)
self.release = True
#print("coil fired at posn 2")
self.delay(name='move_posn', event_type=None, delay=3, handler=self.set_state, param='empty2')
#print("delay called")
elif self.idol_state=='empty2':
self.move_to_posn(4)
if self.position==4 and self.idol_moving==False and self.release==False:
self.game.coils.idolRelease.pulse(150)
self.release = True
self.delay(name='move_posn', event_type=None, delay=3, handler=self.set_state, param='empty3')
elif self.idol_state=='empty3':
self.move_to_posn(6)
if self.position==6 and self.idol_moving==False and self.release==False:
self.game.coils.idolRelease.pulse(150)
self.release = True
self.delay(name='move_posn', event_type=None, delay=3, handler=self.set_state, param='initialise')
elif self.idol_state=='no_lock':
# self.move_to_posn(4)
# if self.position==4 and self.idol_moving==False and self.release==False:
# self.game.coils.idolRelease.pulse(150)
# self.release = True
# self.delay(name='move_posn', event_type=None, delay=1.5, handler=self.set_state, param='initialise')
#for a 'no lock' state lock a ball then immediately release it
if self.balls_in_idol==1:
self.move_to_posn(3,self.lock_release)
elif self.balls_in_idol==2:
self.move_to_posn(5,self.lock_release)
elif self.balls_in_idol==3:
self.move_to_posn(1,self.lock_release)
elif self.idol_state=='lock':
self.next_lock_posn()
elif self.idol_state=='release':
self.next_posn()
if self.idol_moving==False and self.release==False:
self.game.coils.idolRelease.pulse(150)
self.release = True
#once ball is released, move idol to next posn
#self.delay(name='move_posn', event_type=None, delay=0.25, handler=self.next_posn)
elif self.idol_state=='hold':
self.next_lock_posn()
if self.idol_moving==False and self.release==False:
self.release = True
#set a delay to trigger release in preset time
self.delay(name='move_posn', event_type=None, delay=10, handler=self.lock_release)
def set_state(self,value):
self.idol_state=value
def empty(self):
self.set_state('empty')
def lock(self):
self.set_state('lock')
def no_lock(self):
self.set_state('no_lock')
def lock_release(self):
self.set_state('release')
def hold(self):
self.set_state('hold')
def home(self):
self.log.info("Moving To Home Position")
self.set_state('initialise')
def next_lock_posn(self):
# if self.position==1 or self.position==2:
# self.move_to_posn(3)
# elif self.position==3 or self.position==4:
# self.move_to_posn(5)
# elif self.position==5 or self.position==6:
# self.move_to_posn(1)
if self.balls_in_idol==1:
self.delay(name='move_posn', event_type=None, delay=1.5, handler=self.move_to_posn, param=3)
elif self.balls_in_idol==2:
self.delay(name='move_posn', event_type=None, delay=1.5, handler=self.move_to_posn, param=5)
elif self.balls_in_idol==3:
self.idol_state='idle'
# def next_release_posn(self):
# if self.position==1 or self.position==2:
# self.move_to_posn(4)
# elif self.position==3 or self.position==4:
# self.move_to_posn(6)
# elif self.position==5 or self.position==6:
# self.move_to_posn(2)
def next_posn(self):
if self.next_posn_set==False:
if self.position<6:
self.next_posn_num+=1
else:
self.next_posn_num=1
self.next_posn_set=True
self.move_to_posn(self.next_posn_num)
def sw_wheelPosition2_active(self, sw):
if self.game.switches.wheelPosition1.is_inactive() and self.game.switches.wheelPosition3.is_active():
self.position =1
self.log.info("Position: "+str(self.position))
def sw_wheelPosition2_inactive(self, sw):
if self.game.switches.wheelPosition1.is_active() and self.game.switches.wheelPosition3.is_inactive():
self.position =4
self.log.info("Position: "+str(self.position))
def sw_wheelPosition1_active(self, sw):
if self.game.switches.wheelPosition2.is_active() and self.game.switches.wheelPosition3.is_inactive():
self.position =3
self.log.info("Position: "+str(self.position))
def sw_wheelPosition1_inactive(self, sw):
if self.game.switches.wheelPosition2.is_inactive() and self.game.switches.wheelPosition3.is_active():
self.position =6
self.log.info("Position: "+str(self.position))
def sw_wheelPosition3_active(self, sw):
if self.game.switches.wheelPosition1.is_active() and self.game.switches.wheelPosition2.is_inactive():
self.position =5
self.log.info("Position: "+str(self.position))
def sw_wheelPosition3_inactive(self, sw):
if self.game.switches.wheelPosition1.is_inactive() and self.game.switches.wheelPosition2.is_active():
self.position =2
self.log.info("Position: "+str(self.position))
def check_popper(self):
if self.balls_in_idol<3 and self.game.switches.rightPopper.is_active():
self.game.coils.ballPopper.pulse(50)
elif self.balls_in_idol==3 and self.game.switches.rightPopper.is_active():
self.lock_release()
#idol upkicker
def sw_rightPopper_active_for_500ms(self, sw):
self.check_popper()
# def sw_rightPopper_inactive(self, sw):
# if self.game.switches.subwayLockup.is_active():
# self.game.coils.subwayRelease.pulse(30)
#subway
#subway entrance needs using
def sw_subwayLockup_active(self, sw):
#self.idol_state='lock'
if self.game.switches.rightPopper.is_inactive():
self.game.coils.subwayRelease.pulse(100)
# def sw_subwayLockup_inactive(self, sw):
# pass
def update_ball_tracking(self,num):
self.game.trough.num_balls_locked = self.balls_in_idol
self.game.trough.num_balls_in_play +=num
self.log.info("Balls in Idol: "+str(self.balls_in_idol))
#idol entrance
def sw_topIdolEnter_active(self, sw):
if self.game.ball>0:
self.balls_in_idol+=1
self.update_ball_tracking(-1)
# def sw_topIdolEnter_time_since_change_500ms(self, sw):
# self.balls_in_idol+=1
# if self.idol_state !='empty':
# if self.balls_in_idol>3:
# self.set_state('release_single')
# elif self.lock_lit==False:
# self.set_state('no_lock')
# else:
# self.set_state('lock')
# def sw_topIdolEnter_inactive(self, sw):
# if self.game.switches.subwayLockup.is_active():
# self.game.coils.subwayRelease.pulse(20)
def sw_exitIdol_active(self, sw):
if self.game.ball>0:
self.balls_in_idol-=1
self.update_ball_tracking(1)
self.check_popper()
self.game.sound.play("ball_release")
def sw_buyInButton_active_for_500ms(self, sw):
self.idol_state='empty'
|
mypinballs/indianajones
|
idol_bkup.py
|
Python
|
gpl-3.0
| 11,157
|
# Utility functions for OpenMORA scripts
#
# Part of OpenMora - https://github.com/OpenMORA
import os, sys, string
import platform
import yaml
def get_mora_paths():
""" Returns a list of paths with MORA modules, from the env var MORA_PATH
"""
if not 'MORA_PATH' in os.environ:
print('**ERROR** Environment variable MORA_PATH not set')
sys.exit(1)
sMoraPaths=os.environ['MORA_PATH'];
if platform.system()=="Windows":
sPathDelim = ";"
else:
sPathDelim = ":"
morabase_dir="";
return sMoraPaths.split(sPathDelim)
def get_morabase_dir():
""" Returns the path of "mora-base" pkg
"""
mora_paths = get_mora_paths() # Get env vars
for p in mora_paths:
tstPath = os.path.normpath(p + "/mora-base")
if os.path.exists(tstPath):
morabase_dir = tstPath
if (len(morabase_dir)==0) or (not os.path.exists(morabase_dir)):
print("Couldn't detect mora-base in MORA_PATH!!")
sys.exit(1)
return morabase_dir
import sys, math
def progress(percent):
''' source: http://gunslingerc0de.wordpress.com/2010/08/13/python-command-line-progress-bar/ '''
width = 74
marks = math.floor(width * (percent / 100.0))
spaces = math.floor(width - marks)
loader = '[' + ('=' * int(marks)) + (' ' * int(spaces)) + ']'
if percent >= 100:
percent = 100
sys.stdout.write("%s %d%%\r" % (loader, percent))
if percent >= 100:
pass
sys.stdout.write("\n")
sys.stdout.flush()
def get_pkgs_root():
'''Returns the path to the parent directory of mora-base'''
morabase_dir = get_morabase_dir()
pkgs_root = os.path.dirname(morabase_dir)
return pkgs_root
def read_distro_file():
'''Returns the yaml contents of the distro file'''
morabase_dir = get_morabase_dir()
pkgs_root = os.path.dirname(morabase_dir)
sDistroFile = os.path.normpath( morabase_dir + "/distro/openmora-pkgs.yaml")
assert os.path.exists(sDistroFile)
assert os.path.exists(pkgs_root + "/mora-base")
# Parse distro file:
fil = open(sDistroFile, 'r')
distro = yaml.load(fil)
fil.close()
#print distro
return distro
|
OpenMORA/mora-base
|
scripts/morautils.py
|
Python
|
gpl-3.0
| 2,018
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from kivy.lang import Builder
from kivy.uix.recycleview import RecycleView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.metrics import dp
from kivy.uix.label import Label
from kivy.properties import StringProperty
from kivy.properties import Property
from kivy.properties import BoundedNumericProperty
from kivy.properties import NumericProperty
from kivy.properties import AliasProperty
# noinspection PyProtectedMember
from kivy.properties import dpi2px
from kivy.graphics.opengl import GL_MAX_TEXTURE_SIZE
from ORCA.widgets.core.Label import cLabel
from ORCA.widgets.core.TouchRectangle import cTouchRectangle
from ORCA.utils.TypeConvert import ToUnicode
from ORCA.utils.TypeConvert import ToHex
from ORCA.utils.RemoveNoClassArgs import RemoveNoClassArgs
__all__ = ['cScrollableLabelLarge']
Builder.load_string('''
<cScrollableLabelLargeInner>:
RecycleBoxLayout:
default_size_hint: 1, None
size_hint: None,None
height: self.minimum_height
''')
# noinspection PyUnusedLocal
class cScrollableLabelLarge(Widget):
""" Main Widget to display a large text
By default, x and y scrolling is enabled
Horizontal scrolling can be disabled by passing
noxscroll = False
Supports background color for the Label
As implementation, it is a Widget which contains a Background (if color is given)
and a customized RecycleView
"""
text = StringProperty('')
#font_size = Property('20sp')
def __init__(self, **kwargs):
kwargsInner={}
for k in kwargs:
if k not in ["size_hint","size","pos","pos_hint"]:
kwargsInner[k]=kwargs[k]
self.oScrollableLabelLargeInner=cScrollableLabelLargeInner(**kwargsInner)
super(self.__class__, self).__init__(**RemoveNoClassArgs(dInArgs=kwargs,oObject=Widget))
self.oBackGround = None
if "background_color" in kwargs:
self.oBackGround=cTouchRectangle(size=self.size,pos=self.pos, background_color=kwargs["background_color"])
self.add_widget(self.oBackGround)
del kwargs["background_color"]
self.oScrollableLabelLargeInner.size = self.size
self.oScrollableLabelLargeInner.pos = self.pos
self.add_widget(self.oScrollableLabelLargeInner)
self.bind(pos=self.update_graphics_pos,size=self.update_graphics_size)
def update_graphics_pos(self, instance, value):
""" Updates the child widget position (Backgrund and Recycleview) """
if self.oBackGround is not None:
self.oBackGround.pos = value
self.oScrollableLabelLargeInner.pos = value
def update_graphics_size(self, instance, value):
""" Updates the child widget size (Backgrund and Recycleview) """
if self.oBackGround is not None:
self.oBackGround.size = value
self.oScrollableLabelLargeInner.size = value
def IncreaseFontSize(self,*args):
""" Pass through function for the Recycleview """
self.oScrollableLabelLargeInner.IncreaseFontSize(args)
def DecreaseFontSize(self,*args):
""" Pass through function for the Recycleview """
self.oScrollableLabelLargeInner.DecreaseFontSize(args)
def on_text(self, instance, value):
""" Pass through function for the Recycleview """
self.oScrollableLabelLargeInner.text=value
def on_oOrcaWidget(self, instance, value):
""" Passes the OrcaWidget to the Childs """
if self.oBackGround is not None:
self.oBackGround.oOrcaWidget=value
self.oScrollableLabelLargeInner.oOrcaWidget=value
def _get_font_size(self):
"""Returns the Font Size """
return self.oScrollableLabelLargeInner.fFontSize
def _set_font_size(self, value):
"""Passes the change of font size """
self.oScrollableLabelLargeInner.font_size = value
def EnableWidget(self, *, bEnable:bool) -> bool:
if bEnable:
if self.oBackGround:
self.oBackGround.opacity = self.oScrollableLabelLargeInner.oOrcaWidget.fOrgOpacity
self.oScrollableLabelLargeInner.opacity = self.oScrollableLabelLargeInner.oOrcaWidget.fOrgOpacity
else:
if self.oBackGround:
self.oBackGround.opacity = 0.0
self.oScrollableLabelLargeInner.opacity = 0.0
font_size = AliasProperty(_get_font_size, _set_font_size)
# noinspection PyUnusedLocal
class cLineLayoutBase(BoxLayout):
""" embedded class to present a single line of text """
text = StringProperty("")
font_size = NumericProperty(0)
def __init__(self, **kwargs):
super(self.__class__,self).__init__(**RemoveNoClassArgs(dInArgs=kwargs,oObject=BoxLayout))
self.oLabel = cLabel(**self.oScrollableLabelLargeInner.kwFontArgs)
if self.oScrollableLabelLargeInner.oOrcaWidget is not None:
self.oLabel.oOrcaWidget = self.oScrollableLabelLargeInner.oOrcaWidget
self.add_widget(self.oLabel)
def on_size(self,*largs):
""" Updates the child widget size (label) """
self.oLabel.height = self.height
self.oLabel.text_size = self.size
def on_text(self,instance,value):
""" sets the text """
self.oLabel.text=value
def on_font_size(self,instance,value):
""" sets the font size """
self.oLabel.font_size=value
# noinspection PyProtectedMember,PyUnusedLocal
class cScrollableLabelLargeInner(RecycleView):
""" The "real' scrollable label (without background) """
# to have similar properties as a Label
font_size = Property('20sp')
text = StringProperty('')
oOrcaWidget = Property(None)
# Internal Property which handles fonmt resizing (not working as RecycleView can't manage change of cached widget)
fFontSize = BoundedNumericProperty(dpi2px(20,'sp'), min=4.0, max=96.0,errorhandler=lambda x: 96.0 if x > 96.0 else 4.0)
def __init__(self, **kwargs):
#we create a new class on the fly top ass the font args to the creation process, as the view adapter creates without arguments
self.cLineLayout=type('cLineLayout', cLineLayoutBase.__bases__, dict(cLineLayoutBase.__dict__))
# passes myself to the embedded class. Not good style but Recycleview limits passing customized parameters
self.cLineLayout.oScrollableLabelLargeInner=self
self.oOrcaWidget = kwargs.get('ORCAWIDGET',None)
# maximal len (in chars) of a single ine of the given text
self.iMaxLen = 0
# Setting the scrolltypes / bars for the Recycleview
self.scroll_type = ['bars', 'content']
self.scroll_wheel_distance = dp(114)
self.bar_width = dp(10)
# The original passed Data array
self.aData = []
# Internal Flag to distinguish between first show and (re) setting text
self.bInit = False
# The maximum width of a char
self.iMaxCharwidth = 0
# The maximum characters per line
self.iMaxCharsPerLine = 0
if "font_size" in kwargs:
self.on_font_size(None,kwargs["font_size"])
# Retrieving the genuine font properties of a label to pass only those arguments to the label (removing pos, hints, background colors , etc
self.aFontProperties = Label._font_properties+("background_color",)
# standard font args, if nothing is given
self.kwFontArgs = {"halign" : "left","valign": "top", "max_lines":1,"font_size":20}
# add / update the font args to be passed to the Label
for k in kwargs:
if k in self.aFontProperties:
self.kwFontArgs[k]=kwargs[k]
self.kwFontArgs["font_size"]=self.fFontSize
self.kwFontArgs.pop("text",None)
# Parameter Flag to disable horizontal scrolling
self.bNoXScroll = kwargs.get("noxscroll",False)
self.bMarkup = kwargs.get("markup", False)
#A dummy label to get th width a the larges character
self.oLabel = Label(**RemoveNoClassArgs(dInArgs=self.kwFontArgs,oObject=Label))
super(self.__class__, self).__init__(**RemoveNoClassArgs(dInArgs=kwargs,oObject=RecycleView))
# This manages the distance between lines
self.layout_manager.default_size = (None,self.oLabel._label.get_extents('W')[1])
#self.layout_manager.default_size = (None, self.fFontSize*1.1)
self.layout_manager.orientation = 'vertical'
# we need to handle size changes
self.bind(size=self.update_size)
self.bind(text=self.on_textinner)
self.text = kwargs.get("text","")
def on_fFontSize(self, instance, value):
""" Will handle font size changes """
if self.layout_manager is not None:
self.kwFontArgs["font_size"]=self.fFontSize
self.oLabel.font_size = self.fFontSize
self.layout_manager.default_size = (None,self.oLabel._label.get_extents('W')[1])
self.SetData(self.aData)
def on_font_size(self, instance, value):
"""Helper function to manage strings with metrics passed as arguments (eg '12dp') """
try:
fValue=float(value)
except:
fValue=dpi2px(value[:-2],value[-2:])
self.fFontSize=fValue
def on_textinner(self, instance, value):
""" helper to have a Label like functionality to set the caption """
self.update_size(None,None)
def IncreaseFontSize(self,*args):
""" Increase the Font size """
self.fFontSize +=1.0
def DecreaseFontSize(self,*args):
""" Decrease the Font size """
self.fFontSize -=1.0
def SetData(self, aData):
""" Passes the data to the Recycle view and sets the layout manager size """
self.data = [{'text': ToUnicode(x),"font_size":self.fFontSize} for x in aData]
if self.bNoXScroll:
self.layout_manager.width=self.width
else:
self.layout_manager.width= self.iMaxCharwidth * self.iMaxCharsPerLine
self.viewclass = self.cLineLayout
self.refresh_from_data()
def update_size(self, instance, value):
""" Fits the text into layout_manager line.
If noxscroll, all line with be split up to fit to the widget size.
if x scrolling is enabled, we look, if the the maximum line length exceed the TEXTURE SIZE.
In that case we split the lines as well and set the scrolling window size to the texture size.
if x scrolling is enabled, and all lines fit to the texture size, we pass the unchanged array """
if self.size==[100,100]:
return
aData = []
bDoLineBreak = False
self.iMaxCharwidth = self.oLabel._label.get_extents('W')[0]
self.iMaxCharsPerLine = int(self.width/self.iMaxCharwidth)
if not self.bNoXScroll:
self.aData = self.text.split('\n')
self.iMaxLen=len(max(self.aData,key=len))
if (self.iMaxCharwidth*self.iMaxLen)>GL_MAX_TEXTURE_SIZE:
self.iMaxCharsPerLine=int(GL_MAX_TEXTURE_SIZE/self.iMaxCharwidth)
bDoLineBreak = True
else:
self.iMaxCharsPerLine=self.iMaxLen
else:
bDoLineBreak = True
if bDoLineBreak:
if self.oLabel is not None:
if len(self.text)>10000:
aData = self.text.split('\n')
i=0
iEnd=len(aData)
while i<iEnd:
if len(aData[i])>self.iMaxCharsPerLine:
aData.insert(i+1,aData[i][self.iMaxCharsPerLine:])
aData[i]=aData[i][:self.iMaxCharsPerLine]
iEnd+=1
i+=1
else:
self.oLabel.size = self.size
self.oLabel.text_size = (self.width,None)
self.oLabel.text = self.text
self.oLabel._label.render()
aData=[]
for oLine in self.oLabel._label._cached_lines:
if len(oLine.words)>0:
uText= u''
for oWord in oLine.words:
if self.bMarkup:
uText+=self.AddMarkUps(oWord)
else:
uText+=oWord.text
aData.append(uText)
else:
aData.append(u'')
self.oLabel.text = ""
self.aData = aData
self.SetData(aData)
else:
self.SetData(self.aData)
def AddMarkUps(self,oWord):
uText=oWord.text
if oWord.options["bold"]:
uText=self.AddMarkUp(uText,"b")
if oWord.options["italic"]:
uText=self.AddMarkUp(uText,"i")
if oWord.options["underline"]:
uText=self.AddMarkUp(uText,"u")
if oWord.options["strikethrough"]:
uText=self.AddMarkUp(uText,"s")
if oWord.options["font_name"] != "Roboto":
uText=self.AddMarkUp(uText,"font",oWord.options["font_name"])
if oWord.options["font_size"] != self.fFontSize:
uText=self.AddMarkUp(uText,"size",ToUnicode(oWord.options["font_size"]))
if oWord.options["color"] != [1,1,1,1]:
uHexColor = u''
for iColor in oWord.options["color"]:
uHexColor+=ToHex(int(iColor*255))
uText=self.AddMarkUp(uText,"color",'#'+uHexColor)
return uText
# noinspection PyMethodMayBeStatic
def AddMarkUp(self,uText,uMarkUp,uValue=None):
if uValue is None:
return "[{1}]{0}[/{1}]".format(uText,uMarkUp)
else:
return "[{1}={2}]{0}[/{1}]".format(uText,uMarkUp,uValue)
|
thica/ORCA-Remote
|
src/ORCA/widgets/core/ScrollableLabelLarge.py
|
Python
|
gpl-3.0
| 15,619
|
#!/usr/bin/python
import xml.sax
import glob
from xml.etree import ElementTree
import os
debug = 0
currentPath = os.path.dirname(os.path.abspath(__file__))
def printl(string):
if debug:
print "[xml_parser:Debug] "+ str(string)
def main ():
p = getProviders()
print p[2].getIncomingServers()
def getProviderFromFilenames():
provs = []
printl("Current Path "+currentPath)
provs = os.listdir(currentPath+"/xml")
printl (provs)
return provs
class ProviderXMLHandler:
fullFilePath = None
domain = None
dom = None
services = []
def __init__(self, xmlfile):
self.fullFilePath = os.path.join(currentPath ,os.path.join('xml',xmlfile))
printl ("Getting "+self.fullFilePath)
self.dom = ElementTree.parse(self.fullFilePath)
self.domain = xmlfile
if self.dom != None:
printl("File geladen: "+str(self.dom))
self.getDisplayName()
self.getIncomingServers()
#self.getDomains()
def getDomain(self):
return self.domain
def getIncomingServers(self):
server = []
incomingServers = self.dom.findall('emailProvider/incomingServer')
printl ("incoming servers "+ str(len(incomingServers)))
for s in incomingServers:
type = s.attrib['type']
if type not in self.services:
self.services.append(type)
printl("Hostname: "+str(s.find('hostname').text))
printl("Hostname: "+str(s.find('port').text))
service = []
service.append(type)
service.append(s.find('hostname').text)
service.append(int(s.find('port').text))
if service not in server:
server.append(service)
printl("getIncomingServers: "+str(server))
return server
def canProviderIMAP(self):
if "imap" in self.services:
return True
return False
def canProviderPOP3(self):
if "pop3" in self.services:
return True
return False
def getDisplayName(self):
displayName = self.dom.findall('emailProvider/displayName')
if len(displayName) > 0:
displayName = displayName[0].text
printl ("Display name: " + displayName)
return displayName.encode('utf8')
else:
printl ("Display name: none")
return "None";
def getDomains(self):
domains = self.dom.findall('emailProvider/domain')
printl("\nGetting Domains")
for d in domains:
printl(d.text)
return domains
def getProviders():
providers = []
names = getProviderFromFilenames()
printl("files found: "+str(len(names)))
for p in names:
printl ("Provider: "+str(p))
providers.append(ProviderXMLHandler(p))
return providers
#main()
|
blu3f1r3/sMail-Checker
|
xml_parser.py
|
Python
|
gpl-3.0
| 2,892
|
from django.conf.urls import url
from . import views
app_name = 'persons'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^contact/$', views.contact, name='contact'),
url(r'^thanks/$', views.thanks, name='thanks'),
url(r'^upload/$', views.upload_file, name='upload_file'),
url(r'^success/$', views.success, name='success'),
url(r'^uploadImage/$', views.uploadImage, name='uploadImage'),
]
|
grodrigo/django_general
|
persons/urls.py
|
Python
|
gpl-3.0
| 427
|
"""Backwards compatible tunnel handler for
phpsploit v1 backdoors, aka:
<?php eval(base64_decode($_POST[%%PASSKEY%%])); ?>
"""
__all__ = ["Request_V1_x"]
from . import handler
from .exceptions import BuildError
class Request_V1_x(handler.Request):
def __init__(self):
"""Force default method to POST, because only this one
was supported on phpsploit v1 versions.
"""
super().__init__()
self.default_method = "POST"
def build_forwarder(self, method, decoder):
"""Assuming that phpsploit v1 uses POST data as payload container
without using an intermediate forwarder, this method shall
always return an empty dictionnary.
"""
return {}
def load_multipart(self):
raise BuildError("Can't send multi request in v1-compatible mode")
|
nil0x42/phpsploit
|
src/core/tunnel/compat_handler.py
|
Python
|
gpl-3.0
| 837
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os.path
from django import template
FMT = 'JPEG'
EXT = 'jpg'
QUAL = 75
register = template.Library()
def resized_path(path, size, method):
"Returns the path for the resized image."
dir, name = os.path.split(path)
image_name, ext = name.rsplit('.', 1)
return os.path.join(dir, '%s_%s_%s.%s' % (image_name, method, size, EXT))
def scale(imagefield, size, method='scale'):
"""
Template filter used to scale an image
that will fit inside the defined area.
Returns the url of the resized image.
{% load image_tags %}
{{ profile.picture|scale:"48x48" }}
"""
# imagefield can be a dict with "path" and "url" keys
if imagefield.__class__.__name__ == 'dict':
imagefield = type('imageobj', (object,), imagefield)
image_path = resized_path(imagefield.path, size, method)
if not os.path.exists(image_path):
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
raise ImportError('Cannot import the Python Image Library.')
image = Image.open(imagefield.path)
# normalize image mode
if image.mode != 'RGB':
image = image.convert('RGB')
# parse size string 'WIDTHxHEIGHT'
width, height = [int(i) for i in size.split('x')]
# use PIL methods to edit images
if method == 'scale':
image.thumbnail((width, height), Image.ANTIALIAS)
image.save(image_path, FMT)
elif method == 'crop':
try:
import ImageOps
except ImportError:
from PIL import ImageOps
ImageOps.fit(image, (width, height), Image.ANTIALIAS
).save(image_path, FMT)
return resized_path(imagefield.url, size, method)
def crop(imagefield, size):
"""
Template filter used to crop an image
to make it fill the defined area.
{% load image_tags %}
{{ profile.picture|crop:"48x48" }}
"""
return scale(imagefield, size, 'crop')
register.filter('scale', scale)
register.filter('crop', crop)
|
dw9694/django_blog
|
blog/templatetags/image_tags.py
|
Python
|
gpl-3.0
| 2,209
|
from src import cron
from src.api import Api
api = Api()
def fetch_photo():
api.fetch_photo()
@cron.route('/worker', methods=['GET'])
def scheduler_worker():
fetch_photo()
return 'fetch photo...'
|
Assassinss/daily-artwork
|
src/worker.py
|
Python
|
gpl-3.0
| 229
|
'''
Copyright(C) 2016, Stamus Networks
Written by Laurent Defert <lds@stamus-networks.com>
This file is part of Scirius.
Scirius is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Scirius is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Scirius. If not, see <http://www.gnu.org/licenses/>.
'''
import os
from django.core.management.base import BaseCommand
from rules.es_data import ESData
class Command(BaseCommand, ESData):
help = 'Export Kibana dashboards.'
def __init__(self, *args, **kw):
BaseCommand.__init__(self, *args, **kw)
ESData.__init__(self)
def add_arguments(self, parser):
parser.add_argument(
'--full',
action='store_true',
dest='full',
default=False,
help='Save everything (SN dashboards and index)'
)
def handle(self, *args, **options):
tar_name, tar_file = self.kibana_export(options['full'])
os.rename(tar_file, tar_name)
self.stdout.write('Kibana dashboards saved to %s' % tar_name)
|
StamusNetworks/scirius
|
rules/management/commands/kibana_export.py
|
Python
|
gpl-3.0
| 1,475
|
# Copyright (C) 2009-2013 Roman Zimbelmann <hut@lepus.uberspace.de>
# This software is distributed under the terms of the GNU GPL version 3.
from inspect import isfunction
from ranger.ext.signals import SignalDispatcher, Signal
from ranger.core.shared import FileManagerAware
from ranger.gui.colorscheme import _colorscheme_name_to_class
import re
import os.path
ALLOWED_SETTINGS = {
'automatically_count_files': bool,
'autosave_bookmarks': bool,
'autoupdate_cumulative_size': bool,
'cd_bookmarks': bool,
'collapse_preview': bool,
'colorscheme': str,
'column_ratios': (tuple, list),
'confirm_on_delete': str,
'dirname_in_tabs': bool,
'display_size_in_main_column': bool,
'display_size_in_status_bar': bool,
'display_tags_in_all_columns': bool,
'draw_borders': bool,
'draw_progress_bar_in_status_bar': bool,
'flushinput': bool,
'hidden_filter': str,
'idle_delay': int,
'max_console_history_size': (int, type(None)),
'max_history_size': (int, type(None)),
'mouse_enabled': bool,
'open_all_images': bool,
'padding_right': bool,
'preview_directories': bool,
'preview_files': bool,
'preview_images': bool,
'preview_max_size': int,
'preview_script': (str, type(None)),
'save_console_history': bool,
'scroll_offset': int,
'shorten_title': int,
'show_cursor': bool, # TODO: not working?
'show_selection_in_titlebar': bool,
'show_hidden_bookmarks': bool,
'show_hidden': bool,
'sort_case_insensitive': bool,
'sort_directories_first': bool,
'sort_reverse': bool,
'sort': str,
'status_bar_on_top': bool,
'tilde_in_titlebar': bool,
'unicode_ellipsis': bool,
'update_title': bool,
'update_tmux_title': bool,
'use_preview_script': bool,
'vcs_aware': bool,
'vcs_backend_bzr': str,
'vcs_backend_git': str,
'vcs_backend_hg': str,
'xterm_alt_key': bool,
}
DEFAULT_VALUES = {
bool: False,
type(None): None,
str: "",
int: 0,
list: [],
tuple: tuple([]),
}
class Settings(SignalDispatcher, FileManagerAware):
def __init__(self):
SignalDispatcher.__init__(self)
self.__dict__['_localsettings'] = dict()
self.__dict__['_localregexes'] = dict()
self.__dict__['_tagsettings'] = dict()
self.__dict__['_settings'] = dict()
for name in ALLOWED_SETTINGS:
self.signal_bind('setopt.'+name,
self._sanitize, priority=1.0)
self.signal_bind('setopt.'+name,
self._raw_set_with_signal, priority=0.2)
def _sanitize(self, signal):
name, value = signal.setting, signal.value
if name == 'column_ratios':
# TODO: cover more cases here
if isinstance(value, tuple):
signal.value = list(value)
if not isinstance(value, list) or len(value) < 2:
signal.value = [1, 1]
else:
signal.value = [int(i) if str(i).isdigit() else 1 \
for i in value]
elif name == 'colorscheme':
_colorscheme_name_to_class(signal)
elif name == 'preview_script':
if isinstance(value, str):
result = os.path.expanduser(value)
if os.path.exists(result):
signal.value = result
else:
signal.value = None
elif name == 'use_preview_script':
if self._settings['preview_script'] is None and value \
and self.fm.ui.is_on:
self.fm.notify("Preview script undefined or not found!",
bad=True)
def set(self, name, value, path=None, tags=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if name not in self._settings:
previous = None
else:
previous=self._settings[name]
assert self._check_type(name, value)
assert not (tags and path), "Can't set a setting for path and tag " \
"at the same time!"
kws = dict(setting=name, value=value, previous=previous,
path=path, tags=tags, fm=self.fm)
self.signal_emit('setopt', **kws)
self.signal_emit('setopt.'+name, **kws)
def get(self, name, path=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if path:
localpath = path
else:
try:
localpath = self.fm.thisdir.path
except:
localpath = path
if localpath:
for pattern, regex in self._localregexes.items():
if name in self._localsettings[pattern] and\
regex.search(localpath):
return self._localsettings[pattern][name]
if self._tagsettings and path:
realpath = os.path.realpath(path)
if realpath in self.fm.tags:
tag = self.fm.tags.marker(realpath)
if tag in self._tagsettings and name in self._tagsettings[tag]:
return self._tagsettings[tag][name]
if name in self._settings:
return self._settings[name]
else:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._raw_set(name, value)
self.__setattr__(name, value)
return self._settings[name]
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self.set(name, value, None)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self.get(name, None)
def __iter__(self):
for x in self._settings:
yield x
def types_of(self, name):
try:
typ = ALLOWED_SETTINGS[name]
except KeyError:
return tuple()
else:
if isinstance(typ, tuple):
return typ
else:
return (typ, )
def _check_type(self, name, value):
typ = ALLOWED_SETTINGS[name]
if isfunction(typ):
assert typ(value), \
"Warning: The option `" + name + "' has an incorrect type!"
else:
assert isinstance(value, typ), \
"Warning: The option `" + name + "' has an incorrect type!"\
" Got " + str(type(value)) + ", expected " + str(typ) + "!" +\
" Please check if your commands.py is up to date." if not \
self.fm.ui.is_set_up else ""
return True
__getitem__ = __getattr__
__setitem__ = __setattr__
def _raw_set(self, name, value, path=None, tags=None):
if path:
if not path in self._localsettings:
try:
regex = re.compile(path)
except:
# Bad regular expression
return
self._localregexes[path] = regex
self._localsettings[path] = dict()
self._localsettings[path][name] = value
# make sure name is in _settings, so __iter__ runs through
# local settings too.
if not name in self._settings:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._settings[name] = value
elif tags:
for tag in tags:
if tag not in self._tagsettings:
self._tagsettings[tag] = dict()
self._tagsettings[tag][name] = value
else:
self._settings[name] = value
def _raw_set_with_signal(self, signal):
self._raw_set(signal.setting, signal.value, signal.path, signal.tags)
class LocalSettings():
def __init__(self, path, parent):
self.__dict__['_parent'] = parent
self.__dict__['_path'] = path
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self._parent.set(name, value, self._path)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self._parent.get(name, self._path)
def __iter__(self):
for x in self._parent._settings:
yield x
__getitem__ = __getattr__
__setitem__ = __setattr__
|
mullikine/ranger
|
ranger/container/settings.py
|
Python
|
gpl-3.0
| 8,531
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import dbus
import avahi
import socket
from folavirt.networking.NetworkInterface import NetworkInterface
"""
Klasa służy do rozgłąszania czegoś poprzez Avahi
"""
class AvahiBroadcaster():
def __init__(self, name, avahiname, avahiport):
self._service_name = name
self.avahiname = avahiname
self.avahiport = avahiport
self.bus = dbus.SystemBus()
raw_server = self.bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER)
self.server = dbus.Interface(raw_server, avahi.DBUS_INTERFACE_SERVER)
self.group = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME, self.server.EntryGroupNew()), avahi.DBUS_INTERFACE_ENTRY_GROUP)
"""
Publikuj
"""
def publish(self):
self.group.AddService(
NetworkInterface().getIndex(),
avahi.PROTO_INET,
0,
self._service_name,
self.avahiname,
'',
'',
self.avahiport,
''
)
self.group.Commit()
def unpublish(self):
self.group.Reset()
|
lokipl/folavirt
|
lib/folavirt/networking/AvahiBroadcaster.py
|
Python
|
gpl-3.0
| 1,356
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.independent.StateType import StateType
logger = logging.getLogger(__name__)
class EnvironmentVariable58StateElement(StateType):
MODEL_MAP = {
'tag_name': 'environmentvariable58_state',
'elements': [
{'tag_name': 'pid', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0, 'max': 1},
{'tag_name': 'name', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0, 'max': 1},
{'tag_name': 'value', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0, 'max': 1},
],
}
|
cjaymes/pyscap
|
src/scap/model/oval_5/defs/independent/EnvironmentVariable58StateElement.py
|
Python
|
gpl-3.0
| 1,279
|
# Copyright 2014-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._strcmp95.
The strcmp95 algorithm variant of Jaro-Winkler distance
"""
from collections import defaultdict
from typing import Any, DefaultDict, Tuple
from ._distance import _Distance
__all__ = ['Strcmp95']
class Strcmp95(_Distance):
"""Strcmp95.
This is a Python translation of the C code for strcmp95:
http://web.archive.org/web/20110629121242/http://www.census.gov/geo/msb/stand/strcmp.c
:cite:`Winkler:1994`.
The above file is a US Government publication and, accordingly,
in the public domain.
This is based on the Jaro-Winkler distance, but also attempts to correct
for some common typos and frequently confused characters. It is also
limited to uppercase ASCII characters, so it is appropriate to American
names, but not much else.
.. versionadded:: 0.3.6
"""
_sp_mx = (
('A', 'E'),
('A', 'I'),
('A', 'O'),
('A', 'U'),
('B', 'V'),
('E', 'I'),
('E', 'O'),
('E', 'U'),
('I', 'O'),
('I', 'U'),
('O', 'U'),
('I', 'Y'),
('E', 'Y'),
('C', 'G'),
('E', 'F'),
('W', 'U'),
('W', 'V'),
('X', 'K'),
('S', 'Z'),
('X', 'S'),
('Q', 'C'),
('U', 'V'),
('M', 'N'),
('L', 'I'),
('Q', 'O'),
('P', 'R'),
('I', 'J'),
('2', 'Z'),
('5', 'S'),
('8', 'B'),
('1', 'I'),
('1', 'L'),
('0', 'O'),
('0', 'Q'),
('C', 'K'),
('G', 'J'),
)
def __init__(self, long_strings: bool = False, **kwargs: Any) -> None:
"""Initialize Strcmp95 instance.
Parameters
----------
long_strings : bool
Set to True to increase the probability of a match when the number
of matched characters is large. This option allows for a little
more tolerance when the strings are large. It is not an appropriate
test when comparing fixed length fields such as phone and social
security numbers.
**kwargs
Arbitrary keyword arguments
.. versionadded:: 0.4.0
"""
super(Strcmp95, self).__init__(**kwargs)
self._long_strings = long_strings
def sim(self, src: str, tar: str) -> float:
"""Return the strcmp95 similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Strcmp95 similarity
Examples
--------
>>> cmp = Strcmp95()
>>> cmp.sim('cat', 'hat')
0.7777777777777777
>>> cmp.sim('Niall', 'Neil')
0.8454999999999999
>>> cmp.sim('aluminum', 'Catalan')
0.6547619047619048
>>> cmp.sim('ATCG', 'TAGC')
0.8333333333333334
.. versionadded:: 0.1.0
.. versionchanged:: 0.3.6
Encapsulated in class
"""
def _in_range(char: str) -> bool:
"""Return True if char is in the range (0, 91).
Parameters
----------
char : str
The character to check
Returns
-------
bool
True if char is in the range (0, 91)
.. versionadded:: 0.1.0
"""
return 91 > ord(char) > 0
ying = src.strip().upper()
yang = tar.strip().upper()
if ying == yang:
return 1.0
# If either string is blank - return - added in Version 2
if not ying or not yang:
return 0.0
adjwt = defaultdict(int) # type: DefaultDict[Tuple[str, str], int]
# Initialize the adjwt array on the first call to the function only.
# The adjwt array is used to give partial credit for characters that
# may be errors due to known phonetic or character recognition errors.
# A typical example is to match the letter "O" with the number "0"
for tup in self._sp_mx:
adjwt[(tup[0], tup[1])] = 3
adjwt[(tup[1], tup[0])] = 3
if len(ying) > len(yang):
search_range = len(ying)
minv = len(yang)
else:
search_range = len(yang)
minv = len(ying)
# Blank out the flags
ying_flag = [0] * search_range
yang_flag = [0] * search_range
search_range = max(0, search_range // 2 - 1)
# Looking only within the search range,
# count and flag the matched pairs.
num_com = 0
yl1 = len(yang) - 1
for i in range(len(ying)):
low_lim = (i - search_range) if (i >= search_range) else 0
hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1
for j in range(low_lim, hi_lim + 1):
if (yang_flag[j] == 0) and (yang[j] == ying[i]):
yang_flag[j] = 1
ying_flag[i] = 1
num_com += 1
break
# If no characters in common - return
if num_com == 0:
return 0.0
# Count the number of transpositions
k = n_trans = 0
for i in range(len(ying)):
if ying_flag[i] != 0:
j = 0
for j in range(k, len(yang)): # pragma: no branch
if yang_flag[j] != 0:
k = j + 1
break
if ying[i] != yang[j]:
n_trans += 1
n_trans //= 2
# Adjust for similarities in unmatched characters
n_simi = 0
if minv > num_com:
for i in range(len(ying)):
if ying_flag[i] == 0 and _in_range(ying[i]):
for j in range(len(yang)):
if yang_flag[j] == 0 and _in_range(yang[j]):
if (ying[i], yang[j]) in adjwt:
n_simi += adjwt[(ying[i], yang[j])]
yang_flag[j] = 2
break
num_sim = n_simi / 10.0 + num_com
# Main weight computation
weight = (
num_sim / len(ying)
+ num_sim / len(yang)
+ (num_com - n_trans) / num_com
)
weight /= 3.0
# Continue to boost the weight if the strings are similar
if weight > 0.7:
# Adjust for having up to the first 4 characters in common
j = 4 if (minv >= 4) else minv
i = 0
while (i < j) and (ying[i] == yang[i]) and (not ying[i].isdigit()):
i += 1
if i:
weight += i * 0.1 * (1.0 - weight)
# Optionally adjust for long strings.
# After agreeing beginning chars, at least two more must agree and
# the agreeing characters must be > .5 of remaining characters.
if (
self._long_strings
and (minv > 4)
and (num_com > i + 1)
and (2 * num_com >= minv + i)
):
if not ying[0].isdigit():
weight += (1.0 - weight) * (
(num_com - i - 1) / (len(ying) + len(yang) - i * 2 + 2)
)
return weight
if __name__ == '__main__':
import doctest
doctest.testmod()
|
chrislit/abydos
|
abydos/distance/_strcmp95.py
|
Python
|
gpl-3.0
| 8,223
|
# Copyright 2011 Sebastien Maccagnoni-Munch
#
# This file is part of Omoma.
#
# Omoma is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# Omoma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Omoma. If not, see <http://www.gnu.org/licenses/>.
"""
Django template tags for Omoma
"""
|
TheGU/omoma
|
omoma/omoma_web/templatetags/__init__.py
|
Python
|
gpl-3.0
| 666
|