code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
import sys
import codecs
import re
# ใใชๅคๆใใผใใซ
kana_table = {
'a': 'ใ', 'i': 'ใ', 'u': 'ใ', 'e': 'ใ', 'o': 'ใ',
'ka': 'ใ', 'ki': 'ใ', 'ku': 'ใ', 'ke': 'ใ', 'ko': 'ใ',
'sa': 'ใ', 'si': 'ใ', 'su': 'ใ', 'se': 'ใ', 'so': 'ใ',
'ta': 'ใ', 'ti': 'ใก', 'tu': 'ใค', 'te': 'ใฆ', 'to': 'ใจ',
'na': 'ใช', 'ni': 'ใซ', 'nu': 'ใฌ', 'ne': 'ใญ', 'no': 'ใฎ',
'ha': 'ใฏ', 'hi': 'ใฒ', 'hu': 'ใต', 'he': 'ใธ', 'ho': 'ใป',
'ma': 'ใพ', 'mi': 'ใฟ', 'mu': 'ใ', 'me': 'ใ', 'mo': 'ใ',
'ya': 'ใ', 'yu': 'ใ', 'yo': 'ใ',
'ra': 'ใ', 'ri': 'ใ', 'ru': 'ใ', 're': 'ใ', 'ro': 'ใ',
'wa': 'ใ',
'ga': 'ใ', 'gi': 'ใ', 'gu': 'ใ', 'ge': 'ใ', 'go': 'ใ',
'za': 'ใ', 'zi': 'ใ', 'zu': 'ใ', 'ze': 'ใ', 'zo': 'ใ',
'da': 'ใ ', 'di': 'ใข', 'du': 'ใฅ', 'de': 'ใง', 'do': 'ใฉ',
'ba': 'ใฐ', 'bi': 'ใณ', 'bu': 'ใถ', 'be': 'ใน', 'bo': 'ใผ',
'pa': 'ใฑ', 'pi': 'ใด', 'pu': 'ใท', 'pe': 'ใบ', 'po': 'ใฝ',
'kya': 'ใใ', 'kyu': 'ใใ
', 'kyo': 'ใใ',
'sya': 'ใใ', 'syu': 'ใใ
', 'syo': 'ใใ',
'tya': 'ใกใ', 'tyu': 'ใกใ
', 'tyo': 'ใกใ',
'nya': 'ใซใ', 'nyu': 'ใซใ
', 'nyo': 'ใซใ',
'hya': 'ใฒใ', 'hyu': 'ใฒใ
', 'hyo': 'ใฒใ',
'mya': 'ใฟใ', 'myu': 'ใฟใ
', 'myo': 'ใฟใ',
'rya': 'ใใ', 'ryu': 'ใใ
', 'ryo': 'ใใ',
'gya': 'ใใ', 'gyu': 'ใใ
', 'gyo': 'ใใ',
'zya': 'ใใ', 'zyu': 'ใใ
', 'zyo': 'ใใ',
'jya': 'ใใ', 'jyu': 'ใใ
', 'jyo': 'ใใ',
'bya': 'ใณใ', 'byu': 'ใณใ
', 'byo': 'ใณใ',
'pya': 'ใดใ', 'pyu': 'ใดใ
', 'pyo': 'ใดใ',
'sha': 'ใใ', 'shi': 'ใ', 'shu': 'ใใ
', 'sho': 'ใใ',
'tsu': 'ใค',
'cha': 'ใกใ', 'chi': 'ใก', 'chu': 'ใกใ
', 'cho': 'ใกใ',
'fu':'ใต',
'ja':'ใใ', 'ji':'ใ', 'ju':'ใใ
', 'jo':'ใใ',
'dya':'ใขใ', 'dyu':'ใขใ
', 'dyo':'ใขใ',
'kwa':'ใใ',
'gwa':'ใใ', 'wo':'ใ',
'nn' : 'ใ',
'-':'ใผ', ',':'ใ', '.':'ใ',
}
# ๆฏ้ณ
boin_list = {
'a', 'i', 'u', 'e', 'o'
}
# ๅพใใซๆฅใๆๅญใงๅคๆญใใๅฟ
่ฆใฎใใใขใซใใกใใใใฎใชในใ
pend_list = {
't','n'
}
def searchRomajiStartsWith(romaji):
# ใญใผใๅญใฎใญใผใๅๆใใฆๅๆนไธ่ดใ
# ไธ่ดใใใใฐTrue
for k in kana_table.keys():
if k.startswith(romaji):
return True
pass
pass
return False
def convertKana(roma_chars):
# mapใซใญใผใฎใญใผใๅญใๅญๅจใใใใใงใใฏ
if roma_chars in kana_table :
return kana_table[roma_chars]
else :
return False
def convertPendingChars(lastConverted,pendingChar,newChar):
if pendingChar == 'n':
# ็ถใๆๅญใๆฏ้ณใงใชใ็ดๅใซๅคๆใซๆๅใใฆใใใฐ'ใ'
if newChar not in boin_list and newChar != 'n' and lastConverted:
return 'ใ'
elif pendingChar == 't':
# ็ถใๆๅญใ't'ใงๅคๆใซๆๅใใฆใใใฐ'ใฃ'
if newChar == 't' and lastConverted:
return 'ใฃ'
pass
pass
return False
def convertRomaJiFile(in_filepath,out_filepath) -> object:
kanastring = ''
# ใใกใคใซใไธ่กใใค่ชญใฟ่พผใ
for line in codecs.open(in_filepath, 'r', 'utf_8'):
# ๅ
จใฆๅฐๆๅญใจใใ
line = line.lower()
out_line = ''
# ไธๆๅญใใคๅฆ็ใใ
chars = list(line)
romaji = ''
# ๆ็ตใฎๅคๆใซๆๅใใใใฉใใ
lastConverted = False
for char in chars :
# ๅพ็ถๆๅญใงๅคๅใใๆๅญใฎๅคๆใซๆๅใใๅ ดๅใฏใฉใคใณใใใใกใซ่ฟฝๅ
pend_char = convertPendingChars(lastConverted,romaji,char)
if(pend_char != False) :
out_line += pend_char
romaji = char
continue
# ๅคๆใใใๆๅญใใฉใใใใใงใใฏ(ๅคๆใใผใใซใฎใญใผใจๅๆนไธ่ดใๅใ
# ๅฏพ่ฑกใฎๆๅญใงใชใๅ ดๅใๆๅญใใใใกใใฏใชใขใใฆๆๅญใใใฎใพใพ็ตๆใซไปฃๅ
ฅ
romaji += char
if(searchRomajiStartsWith(romaji) == False):
# ๅคๅฎไธญใฎๆๅญใใใใกใใใใฐๅ
ใซใฉใคใณใใใใกใซ็งปใ
if (romaji != ''):
out_line += romaji
romaji = ''
pass
lastConverted = False
continue
else:
# ใขใซใใกใใใ
# ใญใผใๅญใใใใกใซ่ฟฝๅ ใใฆๅคๆใใ
if romaji in pend_list and lastConverted == True:
# ๅพ็ถใฎๆๅญใงๅคใใๆๅญใๅ
ฅๅใใใๅ ดๅใๅคๆใๅพ
ๆฉใใ
lastConverted = True
pass
else :
kana = convertKana(romaji)
if kana is not False:
# ๅคๆๆๅใใซใใ่กใใใใกใซ่ฟฝๅ ใๆๅญใใใใกใใฏใชใข
out_line += kana
romaji = ''
lastConverted = True
else:
lastConverted = False
# ๅคๆๅคฑๆใ3ๆๅญไปฅไธใชใๅคๆไธ่ฝใจๅคๆญใใฆใใฎใพใพ่กใใใใกใซ่ฟฝๅ
# 1,2ๆๅญ็ฎใชใใใชใใใๆๅญใๅคๅฎใใชใใใชใๆๅญใ ใฃใใใใฎใพใพ่กใใใใกใซ่ฟฝๅ
mblen = len(romaji)
if(mblen >= 3):
out_line += romaji
romaji = ''
pass
pass
pass
# ๆชๅคๆใฎใใใใกใๆฎใฃใฆใใใ่กใใใใกใซ่ฟฝๅ
if len(romaji) > 0:
out_line += romaji
romaji = ''
# for debug
print(out_line)
kanastring += out_line
# ๅคๆๅพใฎใใกใคใซใๅบๅใใ
out = codecs.open(out_filepath,'w','utf_8')
out.write(kanastring)
# ๅฆ็ในใฟใผใ
param = sys.argv
# ใณใใณใใฉใคใณๅผๆฐใๆๅฎใฉใใใซใชใฃใฆใใชใใใฐusageใๅใ
if len(param) != 3:
print('Usage: kanaconvert.py [input_file] [output_file]')
else:
convertRomaJiFile(param[1], param[2])
| chikuwayamada/kanaconvert | kanaconvert.py | Python | mit | 6,510 |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('app.views',
(r'^$', 'home'),
(r'^account/(?P<account_id>.+)$', 'account'),
)
| cheddarfinancial/cheddar-oauth-demo | app/urls.py | Python | mit | 164 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2011, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010-2011, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
LDAP Authentication module.
.. moduleauthor:: Gerson Galang <gerson.galang@versi.edu.au>
.. moduleauthor:: Russell Sim <russell.sim@monash.edu>
'''
import ldap
import logging
from django.conf import settings
from tardis.tardis_portal.auth.interfaces import AuthProvider, \
GroupProvider, UserProvider
from tardis.tardis_portal.models import UserAuthentication
logger = logging.getLogger(__name__)
auth_key = u'ldap'
auth_display_name = u'LDAP'
class LDAPBackend(AuthProvider, UserProvider, GroupProvider):
def __init__(self, name, url, base, login_attr, user_base,
user_attr_map, group_id_attr, group_base,
group_attr_map, admin_user='', admin_pass=''):
self.name = name
# Basic info
self._url = url
self._base = base
# Authenticated bind
self._admin_user = admin_user
self._admin_pass = admin_pass
# Login attribute
self._login_attr = login_attr
# User Search
self._user_base = user_base
self._user_attr_map = user_attr_map
self._user_attr_map[self._login_attr] = "id"
# Group Search
self._group_id = group_id_attr
self._group_base = group_base
self._group_attr_map = group_attr_map
self._group_attr_map[self._group_id] = "id"
def _query(self, base, filterstr, attrlist):
"""Safely query LDAP
"""
l = None
searchScope = ldap.SCOPE_SUBTREE
try:
l = ldap.initialize(self._url)
except ldap.LDAPError, e:
logger.error(e.message['desc'], ": ", self._url)
return None
l.protocol_version = ldap.VERSION3
try:
if self._admin_user and self._admin_pass:
l.simple_bind(self._admin_user, self._admin_pass)
else:
l.simple_bind()
except ldap.LDAPError, e:
logger.error(e.args[0]['desc'])
if l:
l.unbind_s()
return None
try:
ldap_result_id = l.search(base, searchScope,
filterstr, attrlist)
result_type, result_data = l.result(ldap_result_id, 1)
return result_data
except ldap.LDAPError, e:
logger.error(e.message['desc'])
finally:
l and l.unbind_s()
return None
#
# AuthProvider
#
def authenticate(self, request):
username = request.POST['username']
password = request.POST['password']
if not username or not password:
return None
l = None
try:
retrieveAttributes = self._user_attr_map.keys() + \
[self._login_attr]
userRDN = self._login_attr + '=' + username
l = ldap.initialize(self._url)
l.protocol_version = ldap.VERSION3
l.simple_bind(userRDN + ',' + self._base, password)
ldap_result = l.search_s(self._user_base, ldap.SCOPE_SUBTREE,
userRDN, retrieveAttributes)
bind_dn = ldap_result[0][0]
l.simple_bind_s(bind_dn, password)
if ldap_result[0][1]['uid'][0] == username:
# check if the given username in combination with the LDAP
# auth method is already in the UserAuthentication table
user = ldap_result[0][1]
return {'display': user['givenName'][0],
"id": user['uid'][0],
"email": user['mail'][0]}
return None
except ldap.LDAPError:
logger.exception("ldap error")
return None
except IndexError:
logger.exception("index error")
return None
finally:
if l:
l.unbind_s()
def get_user(self, user_id):
raise NotImplemented()
#
# User Provider
#
def getUserById(self, id):
"""
return the user dictionary in the format of::
{"id": 123,
"display": "John Smith",
"email": "john@example.com"}
"""
result = self._query(self._user_base,
'(%s=%s)' % (self._login_attr, id),
self._user_attr_map.keys() + [self._login_attr])
user = {}
if not result:
return None
for k, v in result[0][1].items():
user[self._user_attr_map[k]] = v[0]
return user
def getUsernameByEmail(self, email):
l = None
try:
retrieveAttributes = ["uid"]
l = ldap.initialize(self._url)
l.protocol_version = ldap.VERSION3
searchFilter = '(|(mail=%s)(mailalternateaddress=%s))' % (email,
email)
ldap_result = l.search_s(self._user_base, ldap.SCOPE_SUBTREE,
searchFilter, retrieveAttributes)
if ldap_result[0][1]['uid'][0]:
return ldap_result[0][1]['uid'][0]
else:
return None
except ldap.LDAPError:
logger.exception("ldap error")
return None
except IndexError:
logger.exception("index error")
return None
finally:
if l:
l.unbind_s()
#
# Group Provider
#
def getGroups(self, request):
"""return an iteration of the available groups.
"""
try:
# check if a user exists that can authenticate using the VBL
# auth method
userAuth = UserAuthentication.objects.get(
userProfile__user=request.user,
authenticationMethod=self.name)
except UserAuthentication.DoesNotExist:
return
result = self._query(self._group_base,
"(&(objectClass=posixGroup)(%s=%s))" % \
("memberUid", userAuth.username),
self._group_attr_map.keys())
if not result:
return
for g, a in result:
yield a[self._group_id][0]
def getGroupById(self, id):
"""return the group associated with the id::
{"id": 123,
"display": "Group Name",}
"""
result = self._query(self._group_base,
"(&(objectClass=posixGroup)(%s=%s))" % \
(self._group_id, id),
self._group_attr_map.keys())
if not result:
return None
group = {}
for k, v in result[0][1].items():
group[self._group_attr_map[k]] = v[0]
return group
def searchGroups(self, **filter):
reverse_attr = {}
for k, v in self._group_attr_map.items():
reverse_attr[v] = k
qstr = ""
for k, v in filter.items():
qstr += "(%s=%s)" % (reverse_attr[k], v)
result = self._query(self._group_base,
"(&(objectClass=posixGroup)%s)" % qstr,
self._group_attr_map.keys() + ["memberUid"])
print result
print "(&(objectClass=posixGroup)%s)" % qstr
if not result:
return
for g, a in result:
group = {}
for k, v in a.items():
if k in self._group_attr_map:
group[self._group_attr_map[k]] = v[0]
group["members"] = a["memberUid"]
yield group
def getGroupsForEntity(self, id):
"""return a list of groups associated with a particular entity id
"""
result = self._query(self._group_base,
"(&(objectClass=posixGroup)(%s=%s))" % \
("memberUid", id),
self._group_attr_map.keys())
if not result:
return
for g, a in result:
group = {}
for k, v in a.items():
group[self._group_attr_map[k]] = v[0]
yield group
_ldap_auth = None
def ldap_auth():
"""Return an initialised LDAP backend.
"""
global _ldap_auth
if _ldap_auth:
return _ldap_auth
try:
base = settings.LDAP_BASE
except:
raise ValueError('LDAP_BASE must be specified in settings.py')
try:
url = settings.LDAP_URL
except:
raise ValueError('LDAP_URL must be specified in settings.py')
try:
admin_user = settings.LDAP_ADMIN_USER
except:
admin_user = ''
try:
admin_password = settings.LDAP_ADMIN_PASSWORD
except:
admin_password = ''
try:
user_login_attr = settings.LDAP_USER_LOGIN_ATTR
except:
raise ValueError('LDAP_USER_LOGIN_ATTR must be specified in settings.py')
try:
user_base = settings.LDAP_USER_BASE
except:
raise ValueError('LDAP_USER_BASE must be specified in settings.py')
try:
user_attr_map = settings.LDAP_USER_ATTR_MAP
except:
raise ValueError('LDAP_USER_ATTR_MAP must be specified in settings.py')
try:
group_id_attr = settings.LDAP_GROUP_ID_ATTR
except:
raise ValueError('LDAP_GROUP_ID_ATTR must be specified in settings.py')
try:
group_base = settings.LDAP_GROUP_BASE
except:
raise ValueError('LDAP_GROUP_BASE must be specified in settings.py')
try:
group_attr_map = settings.LDAP_GROUP_ATTR_MAP
except:
raise ValueError('LDAP_GROUP_ATTR_MAP must be specified in settings.py')
_ldap_auth = LDAPBackend("ldap", url, base, user_login_attr,
user_base, user_attr_map, group_id_attr,
group_base, group_attr_map, admin_user,
admin_password)
return _ldap_auth
| aaryani/CoreTardisTemp | tardis/tardis_portal/auth/ldap_auth.py | Python | bsd-3-clause | 11,778 |
from django import forms
from common.forms import C2Form
from common.methods import generate_string_from_template_for_server
from utilities.logger import ThreadLogger
from utilities.forms import ConnectionInfoForm
logger = ThreadLogger(__name__)
class TintriEndpointForm(ConnectionInfoForm):
protocol = forms.ChoiceField(
choices=[('http', 'HTTP'), ('https', 'HTTPS')], label='Protocol')
def __init__(self, *args, **kwargs):
super(TintriEndpointForm, self).__init__(*args, **kwargs)
self.fields["name"].widget = forms.HiddenInput()
self.fields["name"].initial = "Tintri Appliance Endpoint"
if not self.initial_instance:
self.fields["port"].initial = 443
self.fields["protocol"].initial = "https"
# ConnectionInfo has support for ssh key which we don't need for Tintri
del self.fields['ssh_key']
del self.fields['use_auth_headers']
del self.fields['headers']
# mark all fields as required
for field in list(self.fields.values()):
field.required = True
#except the labels field
self.fields["labels"].required = False
def clean(self):
try:
from xui.tintri.tintri_api import Tintri
tintri = Tintri()
tintri.verify_connection(
self.cleaned_data.get('protocol'),
self.cleaned_data.get('ip'),
self.cleaned_data.get('port'),
self.cleaned_data.get('username'),
self.cleaned_data.get('password'),
)
except:
raise forms.ValidationError(
"Unable to connect to Tintri Appliance's Endpoint using the parameters provided "
)
return self.cleaned_data
def save(self, *args, **kwargs):
endpoint = super(TintriEndpointForm, self).save(*args, **kwargs)
return endpoint
class TintriSnapshotForm(C2Form):
duration = forms.ChoiceField(
choices=[
('60', 'One Hour'),
('180', "3 Hours"),
('1440', "One Day"),
('10080', 'One Week'),
('-1', 'Never Expires')
], label='Duration (in minutes)')
def __init__(self, *args, **kwargs):
self.server = kwargs.pop("server")
super().__init__(*args, **kwargs)
def save(self):
return {'snapshot_duration': int(self.cleaned_data['duration'])}
class TintriCloneSnapshotForm(C2Form):
new_vm_name = forms.CharField(label="New VM Name")
def __init__(self, *args, **kwargs):
self.server = kwargs.pop("server")
super().__init__(*args, **kwargs)
name_template = "{{ server.hostname }}-tintriclone-00X"
new_name = generate_string_from_template_for_server(
name_template, self.server
)
self.fields['new_vm_name'].initial = new_name
def save(self):
return {'new_vm_name': self.cleaned_data['new_vm_name']}
| CloudBoltSoftware/cloudbolt-forge | ui_extensions/tintri/forms.py | Python | apache-2.0 | 2,981 |
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2008 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from molmod.io.output_parsers import FileParser, MultiLineParser
from molmod.data.periodic import periodic
from molmod.molecules import Molecule
import re, numpy
class ScfEnergiesParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='scf_energies', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"total scf energy\s+=\s+(?P<energy>\S+)")
def reset(self):
self.energies = []
def parse(self, line):
match = self.re.search(line)
if match != None:
self.energies.append(float(match.group("energy")))
def result(self):
return self.energies
class MolecularEnergiesParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='molecular_energies', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"Value of the MolecularEnergy:\s+(?P<energy>\S+)")
def reset(self):
self.energies = []
def parse(self, line):
match = self.re.search(line)
if match != None:
self.energies.append(float(match.group("energy")))
def result(self):
return self.energies
class EnergyAccuracyParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='energy_accuracy', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"value_accuracy\s+=\s+(?P<energy_accuracy>\S+)")
def reset(self):
self.energy_accuracy = None
def parse(self, line):
if self.energy_accuracy == None:
match = self.re.search(line)
if match != None:
self.energy_accuracy = float(match.group("energy_accuracy"))
def result(self):
return self.energy_accuracy
class WarningParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='warnings', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"WARNING:")
def reset(self):
self.warnings = False
def parse(self, line):
if not self.warnings:
match = self.re.search(line)
if match != None:
self.warnings = True
def result(self):
return self.warnings
class OptimizationConvergedParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='optimization_converged', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"The optimization has converged.")
def reset(self):
self.converged = False
def parse(self, line):
if not self.converged:
match = self.re.search(line)
if match != None:
self.converged = True
def result(self):
return self.converged
class OutputMoleculesParser(MultiLineParser):
filename = ".out"
extension = True
def __init__(self, label='output_molecules', condition=None):
activator = re.compile(r"n\s+atoms\s+geometry")
deactivator = re.compile(r"}$")
MultiLineParser.__init__(self, label, activator, deactivator, condition)
self.re = re.compile(r"(?P<symbol>\S+)\s*\[\s*(?P<x>\S+)\s*(?P<y>\S+)\s*(?P<z>\S+)\s*\]")
def reset(self):
MultiLineParser.reset(self)
self.molecules = []
def start_collecting(self):
self.current_atoms = []
def collect(self, line):
match = self.re.search(line)
self.current_atoms.append([
periodic[match.group("symbol")].number,
float(match.group("x")),
float(match.group("y")),
float(match.group("z"))
])
def stop_collecting(self):
self.molecules.append(Molecule(self.current_atoms))
del self.current_atoms
def result(self):
return self.molecules
class GradientsParser(MultiLineParser):
filename = ".out"
extension = True
def __init__(self, label='gradients', condition=None):
activator = re.compile(r"Total Gradient")
deactivator = re.compile(r"^$")
MultiLineParser.__init__(self, label, activator, deactivator, condition)
self.re = re.compile(r"\d+\s+\S+\s+(?P<gradient_x>\S+)\s+(?P<gradient_y>\S+)\s+(?P<gradient_z>\S+)")
def reset(self):
MultiLineParser.reset(self)
self.gradients = []
def start_collecting(self):
self.current_gradient = []
def collect(self, line):
match = self.re.search(line)
if match != None:
self.current_gradient.append([
float(match.group("gradient_x")),
float(match.group("gradient_y")),
float(match.group("gradient_z"))
])
def stop_collecting(self):
gradient = numpy.array(self.current_gradient, float)
self.gradients.append(gradient)
del self.current_gradient
def result(self):
return self.gradients
class GradientAccuracyParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='gradient_accuracy', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"gradient_accuracy\s+=\s+(?P<gradient_accuracy>\S+)")
def reset(self):
self.gradient_accuracy = None
def parse(self, line):
if self.gradient_accuracy == None:
match = self.re.search(line)
if match != None:
self.gradient_accuracy = float(match.group("gradient_accuracy"))
def result(self):
return self.gradient_accuracy
class HessianParser(FileParser):
filename = ".hess"
extension = True
def __init__(self, label='hessian', condition=None):
FileParser.__init__(self, label, condition)
self.re_num_atoms = re.compile(r"(?P<num_atoms>\d+)\s+atoms")
def reset(self):
self.num_atoms = None
self.begin_line = None
self.end_line = None
self.current_line = 0
self.hessian_elements = []
def parse(self, line):
if self.num_atoms == None:
match = self.re_num_atoms.search(line)
if match != None:
self.num_atoms = int(match.group("num_atoms"))
num_elements = self.num_atoms*3 * (self.num_atoms*3 + 1) / 2
num_lines = num_elements / 5
if num_elements % 5 > 0: num_lines += 1
self.begin_line = self.num_atoms + 2
self.end_line = self.begin_line + num_lines
elif (self.current_line >= self.begin_line) and (self.current_line < self.end_line):
self.hessian_elements.extend(float(word) for word in line.split())
#print line
#print [float(word) for word in line.split()]
self.current_line += 1
def result(self):
if self.num_atoms != None:
result = numpy.zeros((self.num_atoms*3, self.num_atoms*3), float)
counter = 0
for i in xrange(self.num_atoms*3):
for j in xrange(0, i):
result[i,j] = self.hessian_elements[counter]
result[j,i] = self.hessian_elements[counter]
counter += 1
result[i,i] = self.hessian_elements[counter]
counter += 1
return result
class RawGridParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='grid', trigger=None, condition=None):
self.trigger = trigger
FileParser.__init__(self, label, condition)
def reset(self):
self.grid = None
self.active = (self.trigger == None)
self.read_nrecords = (self.trigger == None)
def parse(self, line):
if self.active:
words = line.split()
if len(words) == 4:
try:
self.grid[self.counter, 0] = float(words[0])
self.grid[self.counter, 1] = float(words[1])
self.grid[self.counter, 2] = float(words[2])
self.grid[self.counter, 3] = float(words[3])
except ValueError:
self.active = False
else:
self.active = False
self.counter += 1
if self.counter >= self.nrecords:
self.active = False
elif self.read_nrecords:
assert line.startswith("# Number of records:")
words = line.split()
self.nrecords = int(words[-1])
self.grid = numpy.zeros((self.nrecords, 4), float)
self.counter = 0
self.active = True
self.read_nrecords = False
elif self.grid == None:
if line == self.trigger: self.read_nrecords = True
def result(self):
return self.grid
class TotalChargeParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label='total_charge', condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"total charge =\s+(?P<total_charge>\S+)")
def reset(self):
self.total_charge = None
def parse(self, line):
match = self.re.search(line)
if match != None:
self.total_charge = float(match.group("total_charge"))
def result(self):
return self.total_charge
class NPAParser(MultiLineParser):
filename = ".out"
extension = True
def __init__(self, label='npa_charges', condition=None):
activator = re.compile(r"Natural Population Analysis:")
deactivator = re.compile(r"^$")
MultiLineParser.__init__(self, label, activator, deactivator, condition)
self.re = re.compile(r"^\s+\d+\s+\S+\s+(?P<npa_charge>\S+)")
def start_collecting(self):
self.npa_charges = []
def collect(self, line):
match = self.re.search(line)
if match != None:
self.npa_charges.append(float(match.group("npa_charge")))
def stop_collecting(self):
pass
def result(self):
return numpy.array(self.npa_charges)
class MOParser(FileParser):
filename = ".out"
extension = True
def __init__(self, label, mo_type, condition=None):
FileParser.__init__(self, label, condition)
self.re = re.compile(r"%s is\s+(?P<level>\d+)\s+(?P<spin>\S)\s+=\s+(?P<energy>\S+)" % mo_type)
def reset(self):
self.level = None
self.spin = None
self.energy = None
def parse(self, line):
match = self.re.search(line)
if match != None:
self.level = int(match.group("level"))
self.spin = match.group("spin")
self.energy = float(match.group("energy"))
def result(self):
return self.level, self.spin, self.energy
class LUMOParser(MOParser):
def __init__(self, label="lumo", condition=None):
MOParser.__init__(self, label, "LUMO", condition)
class HOMOParser(MOParser):
def __init__(self, label="homo", condition=None):
MOParser.__init__(self, label, "HOMO", condition)
| woutersmet/Molmodsummer | lib/molmod/io/mpqc/file_parsers.py | Python | gpl-3.0 | 11,970 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
version = ''
with open('kafkatest/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
print(self.pytest_args)
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Note: when changing the version of ducktape, also revise tests/docker/Dockerfile
setup(name="kafkatest",
version=version,
description="Apache Kafka System Tests",
author="Apache Kafka",
platforms=["any"],
license="apache2.0",
packages=find_packages(),
include_package_data=True,
install_requires=["ducktape==0.8.8", "requests==2.24.0"],
tests_require=["pytest", "mock"],
cmdclass={'test': PyTest},
zip_safe=False
)
| guozhangwang/kafka | tests/setup.py | Python | apache-2.0 | 2,114 |
import csv, json, io, sys
from collections import OrderedDict
def write_dict_to_json(data):
"""This function pretty prints an input dictionary and returns
its string representation i.e. a json string
"""
jsonstring = json.dumps({"world_indices": [data[k] for k in data]}, indent=4)
return jsonstring
def write_dict_to_csv(data):
"""This function formats an input dictionary into CSV and returns
it as a string. Note that this assumes compatibility i.e. that the
input is a dictionary of dictionaries and each of the sub-dictionaries
have same keys as every other sub-dictionary
"""
if sys.version_info[0] > 2:
output = io.StringIO()
else:
output = io.BytesIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n')
header_written = False
for akey in data:
if not header_written:
writer.writerow([a for a in data[akey]])
header_written = True
writer.writerow([data[akey][x] for x in data[akey]])
return output.getvalue()
| howsunjow/YahooFinance | yahoo_finance/utils.py | Python | mit | 1,081 |
"""
Mica permissions
"""
import mica.core
SUBJECT_TYPES = ('USER', 'GROUP')
PERMISSIONS = ('READER', 'EDITOR', 'REVIEWER')
def add_permission_arguments(parser):
"""
Add permission arguments
"""
parser.add_argument('--add', '-a', action='store_true', help='Add a permission')
parser.add_argument('--delete', '-d', action='store_true', required=False, help='Delete a permission')
parser.add_argument('--permission', '-pe', help="Permission to apply: %s" % ', '.join(PERMISSIONS).lower())
parser.add_argument('--subject', '-s', required=True, help='Subject name to which the permission will be granted')
parser.add_argument('--type', '-ty', required=False, help='Subject type: user or group')
def map_permission(permission):
"""
Map permission argument to permission query parameter
"""
if permission.upper() not in PERMISSIONS:
return None
return permission.upper()
def validate_args(args):
"""
Validate action, permission and subject type
"""
if not args.add and not args.delete:
raise Exception("You must specify a permission operation: [--add|-a] or [--delete|-de]")
if args.add:
if not args.permission:
raise Exception("A permission name is required: %s" % ', '.join(PERMISSIONS).lower())
if map_permission(args.permission) is None:
raise Exception("Valid permissions are: %s" % ', '.join(PERMISSIONS).lower())
if not args.type or args.type.upper() not in SUBJECT_TYPES:
raise Exception("Valid subject types are: %s" % ', '.join(SUBJECT_TYPES).lower())
def do_ws(args, path):
"""
Build the web service resource path
"""
if args.add:
return mica.core.UriBuilder(path) \
.query('type', args.type.upper()) \
.query('role', map_permission(args.permission)) \
.query('principal', args.subject) \
.build()
if args.delete:
return mica.core.UriBuilder(path) \
.query('type', args.type.upper()) \
.query('principal', args.subject) \
.build()
| Rima-B/mica2 | mica-python-client/src/main/python/mica/perm.py | Python | gpl-3.0 | 1,976 |
import threading
total = 0
lock = threading.Lock()
def actualizar_total(cantidad):
global total
with lock:
total += cantidad
print(total)
if __name__ == '__main__':
for x in range(10):
mi_hilo = threading.Thread(target=actualizar_total, args=(10,))
mi_hilo.start()
| ampotty/uip-pc4 | 05.Hilos/Ejemplo/app/hilo4.py | Python | mit | 323 |
# -*- coding: utf-8 -*-
# template 18
"""
Various tools at your fingertips.
The available tools are:
* cvt_csv_2_rst.py: convert csv file into rst file
* cvt_csv_2_xml.py: convert csv file into xml file
* cvt_script: parse bash script and convert to meet company standard
* gen_readme.py: generate documentation files, mainly README.rst
* odoo_dependency.py: show odoo depencies and/or Odoo module tree
* odoo_translation.py: manage Odoo translation
* pep8: parse source .py file to meet pep8 and convert across Odoo versions
* please: developer shell
* wget_odoo_repositories.py: get repository names from github.com
"""
import os
import sys
import pkg_resources
import gzip
import shutil
__version__ = '1.0.7.1'
def fake_setup(**kwargs):
globals()['setup_args'] = kwargs
def read_setup():
setup_info = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'setup.info'))
if not os.path.isfile(setup_info):
setup_info = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'setup.py'))
setup_args = {}
if os.path.isfile(setup_info):
with open(setup_info, 'r') as fd:
exec(fd.read().replace('setup(', 'fake_setup('))
setup_args = globals()['setup_args']
else:
print('Not internal configuration file found!')
setup_args['setup'] = setup_info
try:
pkg = pkg_resources.get_distribution(__package__.split('.')[0])
setup_args['name'] = pkg.key
setup_args['version'] = pkg.version
except BaseException:
pass
return setup_args
def get_pypi_paths():
local_venv = '/devel/venv/'
pkgpath = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
bin_path = lib_path = ''
path = pkgpath
while not bin_path and path != '/' and path != os.environ['HOME']:
path = os.path.dirname(path)
if os.path.isdir(path) and os.path.basename(path) == 'lib':
bin_path = os.path.join(os.path.dirname(path), 'bin')
lib_path = path
if not bin_path and local_venv:
for path in sys.path:
if local_venv in path:
bin_path = os.path.join(
path[:path.find(local_venv)],
*[x for x in local_venv.split('/') if x][:-1])
break
return pkgpath, bin_path, lib_path
def copy_pkg_data(setup_args, verbose):
if setup_args.get('package_data'):
pkgpath, bin_path, lib_path = get_pypi_paths()
if bin_path:
# TODO> compatibility mode
bin2_path = os.path.join(os.environ['HOME'], 'devel')
if not os.path.isdir(bin2_path):
bin2_path = ''
man_path = os.path.join(bin_path, 'man', 'man8')
if not os.path.isdir(man_path):
man_path = ''
for pkg in setup_args['package_data'].keys():
for fn in setup_args['package_data'][pkg]:
base = os.path.basename(fn)
if base in ('setup.info', '*'):
continue
full_fn = os.path.abspath(os.path.join(pkgpath, fn))
if base.endswith('.man') and man_path:
with open(full_fn, 'r') as fd:
help_text = fd.read()
tgt_fn = os.path.join(man_path, '%s.8.gz' % base[:-4])
with gzip.open(tgt_fn, 'w') as fd:
if sys.version_info[0] == 3:
fd.write(help_text.encode('utf-8'))
else:
fd.write(help_text)
continue
if lib_path:
tgt_fn = os.path.join(lib_path, base)
if verbose:
print('$ cp %s %s' % (full_fn, tgt_fn))
shutil.copy(full_fn, tgt_fn)
# TODO> compatibility mode
tgt_fn = os.path.join(bin_path, base)
if os.path.isfile(tgt_fn):
os.unlink(tgt_fn)
if not os.path.exists(tgt_fn):
if verbose:
print('$ ln -s %s %s' % (full_fn, tgt_fn))
os.symlink(full_fn, tgt_fn)
if bin2_path:
tgt_fn = os.path.join(bin2_path, base)
if os.path.isfile(tgt_fn):
os.unlink(tgt_fn)
# if not os.path.exists(tgt_fn):
# if verbose:
# print('$ ln -s %s %s' % (full_fn, tgt_fn))
# os.symlink(full_fn, tgt_fn)
# TODO> compatibility mode to remove early
if lib_path and bin2_path:
for base in ('z0librc', 'odoorc', 'travisrc'):
full_fn = os.path.join(bin2_path, base)
tgt_fn = os.path.join(bin_path, base)
if os.path.exists(full_fn) and not os.path.exists(tgt_fn):
if verbose:
print('$ cp %s %s' % (full_fn, tgt_fn))
shutil.copy(full_fn, tgt_fn)
def main(cli_args=None):
if not cli_args:
cli_args = sys.argv[1:]
action = '-H'
verbose = False
for arg in cli_args:
if arg in ('-h', '-H', '--help', '-V', '--version', '--copy-pkg-data'):
action = arg
elif arg == '-v':
verbose = True
setup_args = read_setup()
if action == '-h':
print('%s [-h][-H][--help][-V][--version][-C][--copy-pkg-data]' %
setup_args['name'])
elif action in ('-V', '--version'):
if setup_args['version'] == __version__:
print(setup_args['version'])
else:
print('Version mismatch %s/%s' % (setup_args['version'],
__version__))
elif action in ('-H', '--help'):
for text in __doc__.split('\n'):
print(text)
elif action in ('-C', '--copy-pkg-data'):
copy_pkg_data(setup_args, verbose)
return 0 | zeroincombenze/tools | wok_code/scripts/main.py | Python | agpl-3.0 | 6,236 |
# Copyright 2014, 2015, Nik Kinkel and David Johnston
# See LICENSE for licensing information
class NotEnoughBytes(Exception):
pass
class UnknownCellCommand(Exception):
pass
class BadCellPayloadLength(Exception):
pass
class BadPayloadData(Exception):
pass
class BadLinkSpecifier(Exception):
pass
class BadCellHeader(Exception):
pass
class BadRelayCellHeader(Exception):
pass
| nskinkel/oppy | oppy/cell/exceptions.py | Python | bsd-3-clause | 416 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.utils import timeutils
import six
from keystone import assignment
from keystone.common import controller
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
def _trustor_trustee_only(trust, user_id):
if (user_id != trust.get('trustee_user_id') and
user_id != trust.get('trustor_user_id')):
raise exception.Forbidden()
def _admin_trustor_only(context, trust, user_id):
if user_id != trust.get('trustor_user_id') and not context['is_admin']:
raise exception.Forbidden()
@dependency.requires('assignment_api', 'identity_api', 'trust_api',
'token_api')
class TrustV3(controller.V3Controller):
collection_name = "trusts"
member_name = "trust"
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
# NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-TRUST/' + cls.collection_name
return super(TrustV3, cls).base_url(context, path=path)
def _get_user_id(self, context):
if 'token_id' in context:
token_id = context['token_id']
token = self.token_api.get_token(token_id)
user_id = token['user']['id']
return user_id
return None
def get_trust(self, context, trust_id):
user_id = self._get_user_id(context)
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
_trustor_trustee_only(trust, user_id)
self._fill_in_roles(context, trust,
self.assignment_api.list_roles())
return TrustV3.wrap_member(context, trust)
def _fill_in_roles(self, context, trust, all_roles):
if trust.get('expires_at') is not None:
trust['expires_at'] = (timeutils.isotime
(trust['expires_at'],
subsecond=True))
if 'roles' not in trust:
trust['roles'] = []
trust_full_roles = []
for trust_role in trust['roles']:
if isinstance(trust_role, six.string_types):
trust_role = {'id': trust_role}
matching_roles = [x for x in all_roles
if x['id'] == trust_role['id']]
if matching_roles:
full_role = assignment.controllers.RoleV3.wrap_member(
context, matching_roles[0])['role']
trust_full_roles.append(full_role)
trust['roles'] = trust_full_roles
trust['roles_links'] = {
'self': (self.base_url(context) + "/%s/roles" % trust['id']),
'next': None,
'previous': None}
def _clean_role_list(self, context, trust, all_roles):
trust_roles = []
all_role_names = dict((r['name'], r) for r in all_roles)
for role in trust.get('roles', []):
if 'id' in role:
trust_roles.append({'id': role['id']})
elif 'name' in role:
rolename = role['name']
if rolename in all_role_names:
trust_roles.append({'id':
all_role_names[rolename]['id']})
else:
raise exception.RoleNotFound("role %s is not defined" %
rolename)
else:
raise exception.ValidationError(attribute='id or name',
target='roles')
return trust_roles
@controller.protected()
def create_trust(self, context, trust=None):
"""Create a new trust.
The user creating the trust must be the trustor.
"""
# Explicitly prevent a trust token from creating a new trust.
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
if auth_context.get('is_delegated_auth'):
raise exception.Forbidden(
_('Cannot create a trust'
' with a token issued via delegation.'))
if not trust:
raise exception.ValidationError(attribute='trust',
target='request')
self._require_attributes(trust, ['impersonation', 'trustee_user_id',
'trustor_user_id'])
if trust.get('project_id'):
self._require_role(trust)
self._require_user_is_trustor(context, trust)
self._require_trustee_exists(trust['trustee_user_id'])
all_roles = self.assignment_api.list_roles()
clean_roles = self._clean_role_list(context, trust, all_roles)
self._require_trustor_has_role_in_project(trust, clean_roles)
trust['expires_at'] = self._parse_expiration_date(
trust.get('expires_at'))
trust_id = uuid.uuid4().hex
new_trust = self.trust_api.create_trust(trust_id, trust, clean_roles)
self._fill_in_roles(context, new_trust, all_roles)
return TrustV3.wrap_member(context, new_trust)
def _require_trustee_exists(self, trustee_user_id):
self.identity_api.get_user(trustee_user_id)
def _require_user_is_trustor(self, context, trust):
user_id = self._get_user_id(context)
if user_id != trust.get('trustor_user_id'):
raise exception.Forbidden(
_("The authenticated user should match the trustor."))
def _require_role(self, trust):
if not trust.get('roles'):
raise exception.Forbidden(
_('At least one role should be specified.'))
def _get_user_role(self, trust):
if not self._attribute_is_empty(trust, 'project_id'):
return self.assignment_api.get_roles_for_user_and_project(
trust['trustor_user_id'], trust['project_id'])
else:
return []
def _require_trustor_has_role_in_project(self, trust, clean_roles):
user_roles = self._get_user_role(trust)
for trust_role in clean_roles:
matching_roles = [x for x in user_roles
if x == trust_role['id']]
if not matching_roles:
raise exception.RoleNotFound(role_id=trust_role['id'])
def _parse_expiration_date(self, expiration_date):
if expiration_date is None:
return None
if not expiration_date.endswith('Z'):
expiration_date += 'Z'
try:
return timeutils.parse_isotime(expiration_date)
except ValueError:
raise exception.ValidationTimeStampError()
@controller.protected()
def list_trusts(self, context):
query = context['query_string']
trusts = []
if not query:
self.assert_admin(context)
trusts += self.trust_api.list_trusts()
if 'trustor_user_id' in query:
user_id = query['trustor_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += (self.trust_api.
list_trusts_for_trustor(user_id))
if 'trustee_user_id' in query:
user_id = query['trustee_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += self.trust_api.list_trusts_for_trustee(user_id)
for trust in trusts:
# get_trust returns roles, list_trusts does not
# It seems in some circumstances, roles does not
# exist in the query response, so check first
if 'roles' in trust:
del trust['roles']
if trust.get('expires_at') is not None:
trust['expires_at'] = (timeutils.isotime
(trust['expires_at'],
subsecond=True))
return TrustV3.wrap_collection(context, trusts)
@controller.protected()
def delete_trust(self, context, trust_id):
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_admin_trustor_only(context, trust, user_id)
self.trust_api.delete_trust(trust_id)
userid = trust['trustor_user_id']
self.token_api.delete_tokens(userid, trust_id=trust_id)
@controller.protected()
def list_roles_for_trust(self, context, trust_id):
trust = self.get_trust(context, trust_id)['trust']
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
return {'roles': trust['roles'],
'links': trust['roles_links']}
@controller.protected()
def check_role_for_trust(self, context, trust_id, role_id):
"""Checks if a role has been assigned to a trust."""
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
if not any(role['id'] == role_id for role in trust['roles']):
raise exception.RoleNotFound(role_id=role_id)
@controller.protected()
def get_role_for_trust(self, context, trust_id, role_id):
"""Get a role that has been assigned to a trust."""
self.check_role_for_trust(context, trust_id, role_id)
role = self.assignment_api.get_role(role_id)
return assignment.controllers.RoleV3.wrap_member(context, role)
| rodrigods/keystone | keystone/trust/controllers.py | Python | apache-2.0 | 10,680 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Application'
db.create_table('hr_application', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('character', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['eve_api.EVEPlayerCharacter'])),
('corporation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['eve_api.EVEPlayerCorporation'])),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('hr', ['Application'])
# Adding model 'Recommendation'
db.create_table('hr_recommendation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('user_character', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['eve_api.EVEPlayerCharacter'])),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hr.Application'])),
))
db.send_create_signal('hr', ['Recommendation'])
# Adding model 'Audit'
db.create_table('hr_audit', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hr.Application'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], blank=True)),
('event', self.gf('django.db.models.fields.IntegerField')()),
('text', self.gf('django.db.models.fields.TextField')()),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('hr', ['Audit'])
# Adding model 'Blacklist'
db.create_table('hr_blacklist', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.IntegerField')()),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('reason', self.gf('django.db.models.fields.TextField')()),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('hr', ['Blacklist'])
def backwards(self, orm):
# Deleting model 'Application'
db.delete_table('hr_application')
# Deleting model 'Recommendation'
db.delete_table('hr_recommendation')
# Deleting model 'Audit'
db.delete_table('hr_audit')
# Deleting model 'Blacklist'
db.delete_table('hr_blacklist')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'eve_api.eveplayeralliance': {
'Meta': {'object_name': 'EVEPlayerAlliance'},
'api_last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_founded': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ticker': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'eve_api.eveplayercharacter': {
'Meta': {'object_name': 'EVEPlayerCharacter'},
'api_last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'attrib_charisma': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'attrib_intelligence': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'attrib_memory': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'attrib_perception': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'attrib_willpower': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'balance': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_api.EVEPlayerCorporation']", 'null': 'True', 'blank': 'True'}),
'current_location_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'director_update': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_logoff': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'race': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_sp': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'eve_api.eveplayercorporation': {
'Meta': {'object_name': 'EVEPlayerCorporation'},
'alliance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_api.EVEPlayerAlliance']", 'null': 'True', 'blank': 'True'}),
'alliance_join_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'api_last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'applications': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'ceo_character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_api.EVEPlayerCharacter']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_color1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_color2': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_color3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_graphic_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_shape1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_shape2': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_shape3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'member_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'shares': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tax_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ticker': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'hr.application': {
'Meta': {'object_name': 'Application'},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_api.EVEPlayerCharacter']"}),
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_api.EVEPlayerCorporation']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'hr.audit': {
'Meta': {'object_name': 'Audit'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hr.Application']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'})
},
'hr.blacklist': {
'Meta': {'object_name': 'Blacklist'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'hr.recommendation': {
'Meta': {'object_name': 'Recommendation'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hr.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'user_character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eve_api.EVEPlayerCharacter']"})
}
}
complete_apps = ['hr']
| nikdoof/test-auth | app/hr/migrations/0001_initial.py | Python | bsd-3-clause | 13,994 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.forms import *
from django.forms.extras import SelectDateWidget
from django.forms.utils import ErrorList
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils import translation
from django.utils.dates import MONTHS_AP
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from .test_error_messages import AssertFormErrorsMixin
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
class FormsExtraTestCase(TestCase, AssertFormErrorsMixin):
###############
# Extra stuff #
###############
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
w = SelectDateWidget(years=('2007','2008','2009','2010','2011','2012','2013','2014','2015','2016'))
# Rendering the default state.
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering the None or '' values should yield the same output.
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
# Rendering a string value.
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering a datetime value.
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates should still render the failed date.
self.assertHTMLEqual(w.render('mydate', '2010-02-31'), """<select name="mydate_month" id="id_mydate_month">
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Rendering with a custom months dict.
w = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>""")
# Using a SelectDateWidget in a form.
w = SelectDateWidget(years=('2007','2008','2009','2010','2011','2012','2013','2014','2015','2016'), required=False)
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
a = GetDate({'mydate_month':'4', 'mydate_day':'1', 'mydate_year':'2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(a['mydate'].as_hidden(), '<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />')
b = GetDate({'mydate':'2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month':'2', 'mydate_day':'31', 'mydate_year':'2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month':'1', 'mydate_day':'1', 'mydate_year':'2010'})
self.assertTrue('<label for="id_mydate_month">' in d.as_p())
def test_multiwidget(self):
# MultiWidget and MultiValueField #############################################
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselved be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], list(data[1]), datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(w.render('name', 'some text,JP,2007-04-25 06:24:00'), """<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" /><input type="text" name="name_2_1" value="06:24:00" />""")
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0],''.join(data_list[1]),data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(f.clean(['some text', ['J','P'], ['2007-04-25','6:24:00']]), 'some text,JP,2007-04-25 06:24:00')
self.assertFormErrors(['Select a valid choice. X is not one of the available choices.'], f.clean, ['some text',['X'], ['2007-04-25','6:24:00']])
# If insufficient data is provided, None is substituted
self.assertFormErrors(['This field is required.'], f.clean, ['some text',['JP']])
# test with no initial data
self.assertTrue(f._has_changed(None, ['some text', ['J','P'], ['2007-04-25','6:24:00']]))
# test when the data is the same as initial
self.assertFalse(f._has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J','P'], ['2007-04-25','6:24:00']]))
# test when the first widget's data has changed
self.assertTrue(f._has_changed('some text,JP,2007-04-25 06:24:00',
['other text', ['J','P'], ['2007-04-25','6:24:00']]))
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
self.assertTrue(f._has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J','P'], ['2009-04-25','11:44:00']]))
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" /><input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>""")
f = ComplexFieldForm({'field1_0':'some text','field1_1':['J','P'], 'field1_2_0':'2007-04-25', 'field1_2_1':'06:24:00'})
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /><input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>""")
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
def test_ipaddress(self):
f = IPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
f = IPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean(' 127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['This is not a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalising code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ล ฤฤลฝฤลพลกฤ'
else:
def __str__(self):
return 'ล ฤฤลฝฤลพลกฤ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ล ฤฤลฝฤลพลกฤ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_overriding_errorlist(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self: return ''
return '<div class="errorlist">%s</div>' % ''.join('<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_field_not_required(self):
b = GetNotRequiredDate({
'mydate_year': '',
'mydate_month': '',
'mydate_day': ''
})
self.assertFalse(b.has_changed())
@override_settings(USE_L10N=True)
class FormsExtraL10NTestCase(TestCase):
def setUp(self):
super(FormsExtraL10NTestCase, self).setUp()
translation.activate('nl')
def tearDown(self):
translation.deactivate()
super(FormsExtraL10NTestCase, self).tearDown()
def test_l10n(self):
w = SelectDateWidget(years=('2007','2008','2009','2010','2011','2012','2013','2014','2015','2016'), required=False)
self.assertEqual(w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010')
self.assertHTMLEqual(w.render('date', '13-08-2010'), """<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899')
def test_l10n_date_changed(self):
"""
Ensure that DateField._has_changed() with SelectDateWidget works
correctly with a localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month':'2', 'mydate_day':'31', 'mydate_year':'2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month':'1', 'mydate_day':'1', 'mydate_year':'2010'})
self.assertTrue('<label for="id_mydate_day">' in a.as_p())
| ZhaoCJ/django | tests/forms_tests/tests/test_extra.py | Python | bsd-3-clause | 37,066 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByReducerDataset(dataset, key_func, reducer)
return _apply_fn
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
if (window_size is not None and window_size_func or
not (window_size is not None or window_size_func)):
raise ValueError("Must pass either window_size or window_size_func.")
if window_size is not None:
def constant_window_func(unused_key):
return ops.convert_to_tensor(window_size, dtype=dtypes.int64)
window_size_func = constant_window_func
assert window_size_func is not None
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByWindowDataset(dataset, key_func, reduce_func,
window_size_func)
return _apply_fn
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
@{tf.data.Dataset.padded_batch}. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
@{tf.data.Dataset.padded_batch}. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary, and caller must ensure that the source
`Dataset` does not contain any elements with length longer than
`max(bucket_boundaries)`.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
with ops.name_scope("bucket_by_seq_length"):
if len(bucket_batch_sizes) != (len(bucket_boundaries) + 1):
raise ValueError(
"len(bucket_batch_sizes) must equal len(bucket_boundaries) + 1")
batch_sizes = constant_op.constant(bucket_batch_sizes, dtype=dtypes.int64)
def element_to_bucket_id(*args):
"""Return int64 id of the length bucket for this element."""
seq_length = element_length_func(*args)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, seq_length),
math_ops.less(seq_length, buckets_max))
bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
# The window size is set to the batch size for this bucket
window_size = batch_sizes[bucket_id]
return window_size
def make_padded_shapes(shapes, none_filler=None):
padded = []
for shape in nest.flatten(shapes):
shape = tensor_shape.TensorShape(shape)
shape = [
none_filler if d.value is None else d
for d in shape
]
padded.append(shape)
return nest.pack_sequence_as(shapes, padded)
def batching_fn(bucket_id, grouped_dataset):
"""Batch elements in dataset."""
batch_size = batch_sizes[bucket_id]
none_filler = None
if pad_to_bucket_boundary:
err_msg = ("When pad_to_bucket_boundary=True, elements must have "
"length <= max(bucket_boundaries).")
check = check_ops.assert_less(
bucket_id,
constant_op.constant(len(bucket_batch_sizes) - 1,
dtype=dtypes.int64),
message=err_msg)
with ops.control_dependencies([check]):
boundaries = constant_op.constant(bucket_boundaries,
dtype=dtypes.int64)
bucket_boundary = boundaries[bucket_id]
none_filler = bucket_boundary
shapes = make_padded_shapes(
padded_shapes or grouped_dataset.output_shapes,
none_filler=none_filler)
return grouped_dataset.padded_batch(batch_size, shapes, padding_values)
def _apply_fn(dataset):
return dataset.apply(
group_by_window(element_to_bucket_id, batching_fn,
window_size_func=window_size_fn))
return _apply_fn
class _GroupByReducerDataset(dataset_ops.Dataset):
"""A `Dataset` that groups its input and performs a reduction."""
def __init__(self, input_dataset, key_func, reducer):
"""See `group_by_reducer()` for details."""
super(_GroupByReducerDataset, self).__init__()
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_init_func(reducer.init_func)
self._make_reduce_func(reducer.reduce_func, input_dataset)
self._make_finalize_func(reducer.finalize_func)
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping Defun for key_func."""
wrapped_func = dataset_ops.StructuredFunctionWrapper(
key_func, "tf.contrib.data.group_by_reducer()", input_dataset)
if not (
wrapped_func.output_types == dtypes.int64 and
wrapped_func.output_shapes.is_compatible_with(tensor_shape.scalar())):
raise ValueError(
"`key_func` must return a single tf.int64 tensor. "
"Got type=%s and shape=%s"
% (wrapped_func.output_types, wrapped_func.output_shapes))
self._key_func = wrapped_func.function
def _make_init_func(self, init_func):
"""Make wrapping Defun for init_func."""
wrapped_func = dataset_ops.StructuredFunctionWrapper(
init_func, "tf.contrib.data.group_by_reducer()",
input_classes=ops.Tensor, input_shapes=tensor_shape.scalar(),
input_types=dtypes.int64)
self._init_func = wrapped_func.function
self._state_classes = wrapped_func.output_classes
self._state_shapes = wrapped_func.output_shapes
self._state_types = wrapped_func.output_types
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping Defun for reduce_func."""
# Iteratively rerun the reduce function until reaching a fixed point on
# `self._state_shapes`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = dataset_ops.StructuredFunctionWrapper(
reduce_func, "tf.contrib.data.group_by_reducer()",
input_classes=(self._state_classes, input_dataset.output_classes),
input_shapes=(self._state_shapes, input_dataset.output_shapes),
input_types=(self._state_types, input_dataset.output_types),
add_to_graph=False)
# Extract and validate class information from the returned values.
for new_state_class, state_class in zip(
nest.flatten(wrapped_func.output_classes),
nest.flatten(self._state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(self._state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
for new_state_type, state_type in zip(
nest.flatten(wrapped_func.output_types),
nest.flatten(self._state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(self._state_types, wrapped_func.output_types))
# Extract shape information from the returned values.
flat_state_shapes = nest.flatten(self._state_shapes)
flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
self._state_shapes = nest.pack_sequence_as(self._state_shapes,
weakened_state_shapes)
self._reduce_func = wrapped_func.function
self._reduce_func.add_to_graph(ops.get_default_graph())
def _make_finalize_func(self, finalize_func):
"""Make wrapping Defun for finalize_func."""
wrapped_func = dataset_ops.StructuredFunctionWrapper(
finalize_func, "tf.contrib.data.group_by_reducer()",
input_classes=self._state_classes, input_shapes=self._state_shapes,
input_types=self._state_types)
self._finalize_func = wrapped_func.function
self._output_classes = wrapped_func.output_classes
self._output_shapes = wrapped_func.output_shapes
self._output_types = wrapped_func.output_types
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
def _as_variant_tensor(self):
return gen_dataset_ops.group_by_reducer_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._key_func.captured_inputs,
self._init_func.captured_inputs,
self._reduce_func.captured_inputs,
self._finalize_func.captured_inputs,
key_func=self._key_func,
init_func=self._init_func,
reduce_func=self._reduce_func,
finalize_func=self._finalize_func,
**dataset_ops.flat_structure(self))
class _GroupByWindowDataset(dataset_ops.Dataset):
"""A `Dataset` that groups its input and performs a windowed reduction."""
def __init__(self, input_dataset, key_func, reduce_func, window_size_func):
"""See `group_by_window()` for details."""
super(_GroupByWindowDataset, self).__init__()
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_reduce_func(reduce_func, input_dataset)
self._make_window_size_func(window_size_func)
def _make_window_size_func(self, window_size_func):
"""Make wrapping Defun for window_size_func."""
def window_size_func_wrapper(key):
return ops.convert_to_tensor(window_size_func(key), dtype=dtypes.int64)
wrapped_func = dataset_ops.StructuredFunctionWrapper(
window_size_func_wrapper, "tf.contrib.data.group_by_window()",
input_classes=ops.Tensor, input_shapes=tensor_shape.scalar(),
input_types=dtypes.int64)
if not (
wrapped_func.output_types == dtypes.int64 and
wrapped_func.output_shapes.is_compatible_with(tensor_shape.scalar())):
raise ValueError(
"`window_size_func` must return a single tf.int64 scalar tensor.")
self._window_size_func = wrapped_func.function
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping Defun for key_func."""
def key_func_wrapper(*args):
return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64)
wrapped_func = dataset_ops.StructuredFunctionWrapper(
key_func_wrapper, "tf.contrib.data.group_by_window()", input_dataset)
if not (
wrapped_func.output_types == dtypes.int64 and
wrapped_func.output_shapes.is_compatible_with(tensor_shape.scalar())):
raise ValueError(
"`key_func` must return a single tf.int64 scalar tensor.")
self._key_func = wrapped_func.function
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping Defun for reduce_func."""
nested_dataset = dataset_ops._NestedDatasetComponent(input_dataset) # pylint: disable=protected-access
wrapped_func = dataset_ops.StructuredFunctionWrapper(
reduce_func, "tf.contrib.data.reduce_by_window()",
input_classes=(ops.Tensor, nested_dataset),
input_shapes=(tensor_shape.scalar(), nested_dataset),
input_types=(dtypes.int64, nested_dataset),
experimental_nested_dataset_support=True)
if not isinstance(
wrapped_func.output_classes, dataset_ops._NestedDatasetComponent): # pylint: disable=protected-access
raise TypeError("`reduce_func` must return a `Dataset` object.")
self._output_classes = wrapped_func.output_classes.output_classes
self._output_types = wrapped_func.output_types.output_types
self._output_shapes = wrapped_func.output_shapes.output_shapes
self._reduce_func = wrapped_func.function
@property
def output_classes(self):
return self._output_classes
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
def _as_variant_tensor(self):
return gen_dataset_ops.group_by_window_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._key_func.captured_inputs,
self._reduce_func.captured_inputs,
self._window_size_func.captured_inputs,
key_func=self._key_func,
reduce_func=self._reduce_func,
window_size_func=self._window_size_func,
**dataset_ops.flat_structure(self))
class Reducer(object):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
1) initialization function: key => initial state
2) reduce function: (old state, input) => new state
3) finalization function: state => result
"""
def __init__(self, init_func, reduce_func, finalize_func):
self._init_func = init_func
self._reduce_func = reduce_func
self._finalize_func = finalize_func
@property
def init_func(self):
return self._init_func
@property
def reduce_func(self):
return self._reduce_func
@property
def finalize_func(self):
return self._finalize_func
| meteorcloudy/tensorflow | tensorflow/contrib/data/python/ops/grouping.py | Python | apache-2.0 | 19,270 |
"""
smq/smq/experiment.py: library components for the experiment shell
(c) 2016 Oswald Berthold
"""
import argparse
# from robots import ...
# available robots: pointmass, simple arm, two-wheeled differential, ...
try:
import rospy
except Exception, e:
print "import rospy failed", e
from smq.utils import get_items, get_items2
from smq.worlds import RobotWorld2
import smq.logging as log
################################################################################
# from im/im_quadrotor_controller
def get_args():
import argparse
# define defaults
default_conf = "conf/default.py"
default_numsteps = None # 10
# create parser
parser = argparse.ArgumentParser()
# add required arguments
parser.add_argument("-c", "--conf", type=str, default=default_conf, help="Configuration file [%s]" % default_conf)
parser.add_argument("-n", "--numsteps", type=int, default=default_numsteps, help="Number of outer loop steps [%s]" % default_numsteps)
# parse arguments
args = parser.parse_args()
# return arguments
return args
################################################################################
# from im/im_experiment.py
def get_config_raw(conf):
# open and read config file containing a dictionary spec
s = open(conf, "r").read()
# parse config into variable, easy
# conf = eval(s)
# proper version with more powerS!
code = compile(s, "<string>", "exec")
global_vars = {}
local_vars = {}
exec(code, global_vars, local_vars)
conf = local_vars["conf"]
# print "conf", conf
return conf
def set_config_defaults(conf):
"""Try and set some reasonable defaults in case of inconsistent configurations"""
# robots
for robot in conf["robots"]:
# brains
for brain in robot["brains"]:
# brain items
print brain["class"]
# kinesis brain
if brain["class"].__name__ == "KinesisBrain": # FIXME
brain_items = {
"variant": "default",
"continuous_gain": 1.5,
"binary_threshold": 0.005,
"binary_high_range": 1.5,
"binary_low_range": 0.01
}
# taxis brain
elif brain["class"].__name__.startswith("TaxisBrain"): # FIXME
brain_items = {
"gain": 1.0
}
else:
brain_items = {}
# copy from default dict to real dict
for k,v in brain_items.items():
if not brain.has_key(k):
brain[k] = v
# tasks
for task in brain["tasks"]:
if not task.has_key("goaldim"):
task["goaldim"] = 1
if not task.has_key("intero_index"):
task["intero_index"] = 1
for analysis in conf["analyses"]:
# print analysis
if not analysis.has_key("type"):
analysis["type"] = "seaborn"
return conf
def set_config_defaults2(conf):
"""Try and set some reasonable defaults in case of inconsistent configurations"""
# brains
for brain in conf["brains"]:
# kinesis brain
if brain["class"].__name__ == "KinesisBrain": # FIXME
brain_items = {
"variant": "default",
"continuous_gain": 1.5,
"binary_threshold": 0.005,
"binary_high_range": 1.5,
"binary_low_range": 0.01
}
# taxis brain
elif brain["class"].__name__.startswith("TaxisBrain"): # FIXME
brain_items = {
"gain": 1.0
}
else:
brain_items = {}
# copy from default dict to real dict
for k,v in brain_items.items():
if not brain.has_key(k):
brain[k] = v
# tasks
for task in brain["tasks"]:
if not task.has_key("goaldim"):
task["goaldim"] = 1
if not task.has_key("intero_index"):
task["intero_index"] = 1
# robots
for robot in conf["robots"]:
pass
for analysis in conf["analyses"]:
# print analysis
if not analysis.has_key("type"):
analysis["type"] = "seaborn"
return conf
def make_expr_sig(args = None):
"""create experiment signature string from args and timestamp"""
import time
# print ("make_expr_sig", args)
# infile = args.infile.replace("/", "_").replace(".wav", "")
# expr_sig = "MN%s_MS%d_IF%s_IS%d_NE%d_EL%d_TS%s" % (args.mode, args.modelsize, infile, args.samplelen, args.numepochs, args.embedding_length, time.strftime("%Y%m%d-%H%M%S"))
expr_sig = time.strftime("%Y%m%d-%H%M%S")
return expr_sig
class Experiment(object):
def __init__(self, args):
self.configfile = args.conf
self.conf = get_config_raw(self.configfile)
self.conf = set_config_defaults(self.conf)
# precendence: conf, args overrides that
self.numsteps = self.conf["numsteps"]
if args.numsteps is not None:
self.numsteps = args.numsteps
# print "self.conf", self.conf
self.brains = []
# self.loss = []
# self.task = []
self.robots = []
self.worlds = [] # index 0 convention, we will have _one_ world for a beginning
self.analyses = []
# initialize global logging
log.init_log2(self.conf)
log.init_log3(self.conf)
# initialize parts from config
self.prepare()
# experiment signature
# self.conf["signature"] = make_expr_sig(self.conf)
def prepare(self):
"""prepare the experiment: construct everything we need from the config"""
# get brain
# self.brains = get_items(self.conf["brains"])
# get task
# self.tasks = get_items(self.conf["tasks"])
# print "self.tasks", self.tasks
# get loss
# append loss to task
# get robot
self.robots = get_items(self.conf["robots"])
# append task to robot
# get world
print "self.conf[\"worlds\"][0]", self.conf["worlds"][0]
self.conf["worlds"][0]["numsteps"] = self.numsteps
self.worlds = get_items(self.conf["worlds"])
self.worlds[0].add(self.robots)
# append robot to world
# append analyses
self.analyses = get_items(self.conf["analyses"])
# finito
def run(self):
"""experiment run method: a set of nested loops capturing optimization over different
time scales, agents, parameters, ..."""
for i in xrange(self.numsteps):
print "# % 10d " % (i) + "#" * 80
# 1. get sensors
# 2. do brain
# 3. do motors
# 4. do world
self.worlds[0].step()
# 5. log
# 6. repeat
# TODO: realtime mode: delay next iteration for realtime plotting and visualization
# store logs, FIXME incrementally
for k,v in log.log_lognodes.items():
print "k", k, "v", type(v)
log.log_store[k] = v
# run analyses
self.analyse()
def analyse(self):
# print "%s.analyse(): implement me" % (self.__class__.__name__)
if len(self.robots) < 1:
return
for a in self.analyses:
a.run(self.robots)
################################################################################
#
class Experiment2(object):
def __init__(self, args):
self.configfile = args.conf
self.conf = get_config_raw(self.configfile)
self.conf = set_config_defaults2(self.conf)
# precendence: conf, args overrides that
self.numsteps = self.conf["numsteps"]
if args.numsteps is not None:
self.numsteps = args.numsteps
self.brains = []
self.robots = []
self.worlds = [] # index 0 convention, we will have _one_ world for a beginning
self.analyses = []
# initialize global logging
log.init_log3(self.conf)
# ROS
if not self.conf.has_key("ros"): self.conf["ros"] = False
if self.conf["ros"]:
rospy.init_node(self.conf["id"])
# initialize parts from config
self.prepare()
# experiment signature
# self.conf["signature"] = make_expr_sig(self.conf)
def prepare(self):
"""prepare the experiment: construct everything we need from the config"""
# get brains
self.brains = get_items2(self.conf, "brains")
# get robot bodies
self.robots = get_items2(self.conf, "robots")
# check for zombies
assert(len(self.brains) == len(self.robots))
# # add interfaces
# for i in range(len(self.brains)):
# self.brains
# get world
self.conf["worlds"][0]["numsteps"] = self.numsteps
self.worlds = get_items(self.conf["worlds"])
self.worlds[0].add(self.brains)
self.worlds[0].add(self.robots)
# print self.worlds[0]
# get analyses
self.analyses = get_items(self.conf["analyses"])
# finito
def run(self):
"""experiment run method: a set of nested loops capturing optimization over different
time scales, agents, parameters, ..."""
for i in xrange(self.numsteps):
# mark
print "# % 10d " % (i) + "#" * 80
# handle ROS
if self.conf["ros"]:
if rospy.is_shutdown():
break
# do world
self.worlds[0].step()
# TODO: realtime mode: delay next iteration for realtime plotting and visualization
# store logs, FIXME incrementally
for k,v in log.log_lognodes.items():
print "k", k, "v", type(v)
log.log_store[k] = v
# run analyses
self.analyse()
def analyse(self):
# print "%s.analyse(): implement me" % (self.__class__.__name__)
if len(self.robots) < 1:
return
for a in self.analyses:
if hasattr(a, "plotitems"):
for plotitem in getattr(a, "plotitems"):
print "plotitem", plotitem
a.run(getattr(self, plotitem))
else:
a.run(self.robots)
| x75/smq | smq/experiments.py | Python | mit | 10,734 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright ยฉ 2013, W. van Ham, Radboud University Nijmegen
This file is part of Sleelab.
Sleelab is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Sleelab is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Sleelab. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import print_function
import logging, signal, numpy as np
import OpenGL
OpenGL.ERROR_ON_COPY = True # make sure we do not accidentally send other structures than numpy arrays
# PyQt (package python-qt4-gl on Ubuntu)
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtOpenGL import *
# project files
from testField import *
import utils
class Main(QMainWindow):
def __init__(self):
super(Main, self).__init__()
self.initUI()
def initUI(self):
#contents
self.field = Field(self)
self.setCentralWidget(self.field)
exitAction = QAction(QIcon('icon/quit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Quit application')
exitAction.triggered.connect(qApp.quit)
self.fullIcon = QIcon('icon/full.png')
self.fullAction = QAction(self.fullIcon, '&Full Screen', self)
self.fullAction.setShortcut('ctrl+F')
self.fullAction.setStatusTip('Toggle Full Screen')
self.fullAction.triggered.connect(self.toggleFullScreen)
# populate the menu bar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
viewMenu = menubar.addMenu('&View')
viewMenu.addAction(self.fullAction)
self.addAction(self.fullAction)
self.addAction(exitAction)
self.statusBar().showMessage('Ready')
self.setWindowTitle('testImage')
self.show()
def toggleFullScreen(self, event=None):
if(self.isFullScreen()):
self.showNormal()
self.menuBar().setVisible(True)
self.statusBar().setVisible(True)
self.setCursor(QCursor(Qt.ArrowCursor))
else:
self.showFullScreen()
self.menuBar().setVisible(False)
self.statusBar().setVisible(False)
#self.setCursor(QCursor(Qt.BlankCursor))
def main():
# make application and main window
a = QApplication(sys.argv)
w = Main()
a.lastWindowClosed.connect(a.quit) # make upper right cross work
# main loop
sys.exit(a.exec_()) # enter main loop (the underscore prevents using the keyword)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| wilberth/Rudolph | testImage.py | Python | gpl-3.0 | 2,798 |
with open('/home/matheus/Imagens/imagem.png', 'rb') as fonte:
with open('/home/matheus/Imagens/imagem3.png', 'wb') as destino:
byte = fonte.read(1)
while byte != b'':
destino.write(byte)
byte = fonte.read(1)
| matheusfarias/Python | codes/arquivos/arquivos9.py | Python | apache-2.0 | 264 |
import time
import os.path
from twisted.trial import unittest
from twisted.application import service
from twisted.internet import defer
from foolscap.api import eventually, fireEventually
from allmydata.util import fileutil, hashutil, pollmixin
from allmydata.storage.server import StorageServer, si_b2a
from allmydata.storage.crawler import ShareCrawler, TimeSliceExceeded
from allmydata.test.test_storage import FakeCanary
from allmydata.test.common_util import StallMixin
class BucketEnumeratingCrawler(ShareCrawler):
cpu_slice = 500 # make sure it can complete in a single slice
slow_start = 0
def __init__(self, *args, **kwargs):
ShareCrawler.__init__(self, *args, **kwargs)
self.all_buckets = []
self.finished_d = defer.Deferred()
def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32):
self.all_buckets.append(storage_index_b32)
def finished_cycle(self, cycle):
eventually(self.finished_d.callback, None)
class PacedCrawler(ShareCrawler):
cpu_slice = 500 # make sure it can complete in a single slice
slow_start = 0
def __init__(self, *args, **kwargs):
ShareCrawler.__init__(self, *args, **kwargs)
self.countdown = 6
self.all_buckets = []
self.finished_d = defer.Deferred()
self.yield_cb = None
def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32):
self.all_buckets.append(storage_index_b32)
self.countdown -= 1
if self.countdown == 0:
# force a timeout. We restore it in yielding()
self.cpu_slice = -1.0
def yielding(self, sleep_time):
self.cpu_slice = 500
if self.yield_cb:
self.yield_cb()
def finished_cycle(self, cycle):
eventually(self.finished_d.callback, None)
class ConsumingCrawler(ShareCrawler):
cpu_slice = 0.5
allowed_cpu_percentage = 0.5
minimum_cycle_time = 0
slow_start = 0
def __init__(self, *args, **kwargs):
ShareCrawler.__init__(self, *args, **kwargs)
self.accumulated = 0.0
self.cycles = 0
self.last_yield = 0.0
def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32):
start = time.time()
time.sleep(0.05)
elapsed = time.time() - start
self.accumulated += elapsed
self.last_yield += elapsed
def finished_cycle(self, cycle):
self.cycles += 1
def yielding(self, sleep_time):
self.last_yield = 0.0
class OneShotCrawler(ShareCrawler):
cpu_slice = 500 # make sure it can complete in a single slice
slow_start = 0
def __init__(self, *args, **kwargs):
ShareCrawler.__init__(self, *args, **kwargs)
self.counter = 0
self.finished_d = defer.Deferred()
def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32):
self.counter += 1
def finished_cycle(self, cycle):
self.finished_d.callback(None)
self.disownServiceParent()
class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin):
def setUp(self):
self.s = service.MultiService()
self.s.startService()
def tearDown(self):
return self.s.stopService()
def si(self, i):
return hashutil.storage_index_hash(str(i))
def rs(self, i, serverid):
return hashutil.bucket_renewal_secret_hash(str(i), serverid)
def cs(self, i, serverid):
return hashutil.bucket_cancel_secret_hash(str(i), serverid)
def write(self, i, ss, serverid, tail=0):
si = self.si(i)
si = si[:-1] + chr(tail)
had,made = ss.remote_allocate_buckets(si,
self.rs(i, serverid),
self.cs(i, serverid),
set([0]), 99, FakeCanary())
made[0].remote_write(0, "data")
made[0].remote_close()
return si_b2a(si)
def test_immediate(self):
self.basedir = "crawler/Basic/immediate"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
sis = [self.write(i, ss, serverid) for i in range(10)]
statefile = os.path.join(self.basedir, "statefile")
c = BucketEnumeratingCrawler(ss, statefile, allowed_cpu_percentage=.1)
c.load_state()
c.start_current_prefix(time.time())
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
# make sure the statefile has been returned to the starting point
c.finished_d = defer.Deferred()
c.all_buckets = []
c.start_current_prefix(time.time())
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
# check that a new crawler picks up on the state file properly
c2 = BucketEnumeratingCrawler(ss, statefile)
c2.load_state()
c2.start_current_prefix(time.time())
self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets))
def test_service(self):
self.basedir = "crawler/Basic/service"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
sis = [self.write(i, ss, serverid) for i in range(10)]
statefile = os.path.join(self.basedir, "statefile")
c = BucketEnumeratingCrawler(ss, statefile)
c.setServiceParent(self.s)
# it should be legal to call get_state() and get_progress() right
# away, even before the first tick is performed. No work should have
# been done yet.
s = c.get_state()
p = c.get_progress()
self.failUnlessEqual(s["last-complete-prefix"], None)
self.failUnlessEqual(s["current-cycle"], None)
self.failUnlessEqual(p["cycle-in-progress"], False)
d = c.finished_d
def _check(ignored):
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
d.addCallback(_check)
return d
def test_paced(self):
self.basedir = "crawler/Basic/paced"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
# put four buckets in each prefixdir
sis = []
for i in range(10):
for tail in range(4):
sis.append(self.write(i, ss, serverid, tail))
statefile = os.path.join(self.basedir, "statefile")
c = PacedCrawler(ss, statefile)
c.load_state()
try:
c.start_current_prefix(time.time())
except TimeSliceExceeded:
pass
# that should stop in the middle of one of the buckets. Since we
# aren't using its normal scheduler, we have to save its state
# manually.
c.save_state()
c.cpu_slice = PacedCrawler.cpu_slice
self.failUnlessEqual(len(c.all_buckets), 6)
c.start_current_prefix(time.time()) # finish it
self.failUnlessEqual(len(sis), len(c.all_buckets))
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
# make sure the statefile has been returned to the starting point
c.finished_d = defer.Deferred()
c.all_buckets = []
c.start_current_prefix(time.time())
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
del c
# start a new crawler, it should start from the beginning
c = PacedCrawler(ss, statefile)
c.load_state()
try:
c.start_current_prefix(time.time())
except TimeSliceExceeded:
pass
# that should stop in the middle of one of the buckets. Since we
# aren't using its normal scheduler, we have to save its state
# manually.
c.save_state()
c.cpu_slice = PacedCrawler.cpu_slice
# a third crawler should pick up from where it left off
c2 = PacedCrawler(ss, statefile)
c2.all_buckets = c.all_buckets[:]
c2.load_state()
c2.countdown = -1
c2.start_current_prefix(time.time())
self.failUnlessEqual(len(sis), len(c2.all_buckets))
self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets))
del c, c2
# now stop it at the end of a bucket (countdown=4), to exercise a
# different place that checks the time
c = PacedCrawler(ss, statefile)
c.load_state()
c.countdown = 4
try:
c.start_current_prefix(time.time())
except TimeSliceExceeded:
pass
# that should stop at the end of one of the buckets. Again we must
# save state manually.
c.save_state()
c.cpu_slice = PacedCrawler.cpu_slice
self.failUnlessEqual(len(c.all_buckets), 4)
c.start_current_prefix(time.time()) # finish it
self.failUnlessEqual(len(sis), len(c.all_buckets))
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
del c
# stop it again at the end of the bucket, check that a new checker
# picks up correctly
c = PacedCrawler(ss, statefile)
c.load_state()
c.countdown = 4
try:
c.start_current_prefix(time.time())
except TimeSliceExceeded:
pass
# that should stop at the end of one of the buckets.
c.save_state()
c2 = PacedCrawler(ss, statefile)
c2.all_buckets = c.all_buckets[:]
c2.load_state()
c2.countdown = -1
c2.start_current_prefix(time.time())
self.failUnlessEqual(len(sis), len(c2.all_buckets))
self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets))
del c, c2
def test_paced_service(self):
self.basedir = "crawler/Basic/paced_service"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
sis = [self.write(i, ss, serverid) for i in range(10)]
statefile = os.path.join(self.basedir, "statefile")
c = PacedCrawler(ss, statefile)
did_check_progress = [False]
def check_progress():
c.yield_cb = None
try:
p = c.get_progress()
self.failUnlessEqual(p["cycle-in-progress"], True)
pct = p["cycle-complete-percentage"]
# after 6 buckets, we happen to be at 76.17% complete. As
# long as we create shares in deterministic order, this will
# continue to be true.
self.failUnlessEqual(int(pct), 76)
left = p["remaining-sleep-time"]
self.failUnless(isinstance(left, float), left)
self.failUnless(left > 0.0, left)
except Exception, e:
did_check_progress[0] = e
else:
did_check_progress[0] = True
c.yield_cb = check_progress
c.setServiceParent(self.s)
# that should get through 6 buckets, pause for a little while (and
# run check_progress()), then resume
d = c.finished_d
def _check(ignored):
if did_check_progress[0] is not True:
raise did_check_progress[0]
self.failUnless(did_check_progress[0])
self.failUnlessEqual(sorted(sis), sorted(c.all_buckets))
# at this point, the crawler should be sitting in the inter-cycle
# timer, which should be pegged at the minumum cycle time
self.failUnless(c.timer)
self.failUnless(c.sleeping_between_cycles)
self.failUnlessEqual(c.current_sleep_time, c.minimum_cycle_time)
p = c.get_progress()
self.failUnlessEqual(p["cycle-in-progress"], False)
naptime = p["remaining-wait-time"]
self.failUnless(isinstance(naptime, float), naptime)
# min-cycle-time is 300, so this is basically testing that it took
# less than 290s to crawl
self.failUnless(naptime > 10.0, naptime)
soon = p["next-crawl-time"] - time.time()
self.failUnless(soon > 10.0, soon)
d.addCallback(_check)
return d
def OFF_test_cpu_usage(self):
# this test can't actually assert anything, because too many
# buildslave machines are slow. But on a fast developer machine, it
# can produce interesting results. So if you care about how well the
# Crawler is accomplishing it's run-slowly goals, re-enable this test
# and read the stdout when it runs.
self.basedir = "crawler/Basic/cpu_usage"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
for i in range(10):
self.write(i, ss, serverid)
statefile = os.path.join(self.basedir, "statefile")
c = ConsumingCrawler(ss, statefile)
c.setServiceParent(self.s)
# this will run as fast as it can, consuming about 50ms per call to
# process_bucket(), limited by the Crawler to about 50% cpu. We let
# it run for a few seconds, then compare how much time
# process_bucket() got vs wallclock time. It should get between 10%
# and 70% CPU. This is dicey, there's about 100ms of overhead per
# 300ms slice (saving the state file takes about 150-200us, but we do
# it 1024 times per cycle, one for each [empty] prefixdir), leaving
# 200ms for actual processing, which is enough to get through 4
# buckets each slice, then the crawler sleeps for 300ms/0.5 = 600ms,
# giving us 900ms wallclock per slice. In 4.0 seconds we can do 4.4
# slices, giving us about 17 shares, so we merely assert that we've
# finished at least one cycle in that time.
# with a short cpu_slice (so we can keep this test down to 4
# seconds), the overhead is enough to make a nominal 50% usage more
# like 30%. Forcing sleep_time to 0 only gets us 67% usage.
start = time.time()
d = self.stall(delay=4.0)
def _done(res):
elapsed = time.time() - start
percent = 100.0 * c.accumulated / elapsed
# our buildslaves vary too much in their speeds and load levels,
# and many of them only manage to hit 7% usage when our target is
# 50%. So don't assert anything about the results, just log them.
print
print "crawler: got %d%% percent when trying for 50%%" % percent
print "crawler: got %d full cycles" % c.cycles
d.addCallback(_done)
return d
def test_empty_subclass(self):
self.basedir = "crawler/Basic/empty_subclass"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
for i in range(10):
self.write(i, ss, serverid)
statefile = os.path.join(self.basedir, "statefile")
c = ShareCrawler(ss, statefile)
c.slow_start = 0
c.setServiceParent(self.s)
# we just let it run for a while, to get figleaf coverage of the
# empty methods in the base class
def _check():
return bool(c.state["last-cycle-finished"] is not None)
d = self.poll(_check)
def _done(ignored):
state = c.get_state()
self.failUnless(state["last-cycle-finished"] is not None)
d.addCallback(_done)
return d
def test_oneshot(self):
self.basedir = "crawler/Basic/oneshot"
fileutil.make_dirs(self.basedir)
serverid = "\x00" * 20
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
for i in range(30):
self.write(i, ss, serverid)
statefile = os.path.join(self.basedir, "statefile")
c = OneShotCrawler(ss, statefile)
c.setServiceParent(self.s)
d = c.finished_d
def _finished_first_cycle(ignored):
return fireEventually(c.counter)
d.addCallback(_finished_first_cycle)
def _check(old_counter):
# the crawler should do any work after it's been stopped
self.failUnlessEqual(old_counter, c.counter)
self.failIf(c.running)
self.failIf(c.timer)
self.failIf(c.current_sleep_time)
s = c.get_state()
self.failUnlessEqual(s["last-cycle-finished"], 0)
self.failUnlessEqual(s["current-cycle"], None)
d.addCallback(_check)
return d
| david415/tahoe-lafs | src/allmydata/test/test_crawler.py | Python | gpl-2.0 | 16,765 |
"""This module implements the SocketServerPort, which basically implements
a serial like interface using a socket server.
"""
import select
class SocketPort(object):
def __init__(self, skt):
self.socket = skt
self.baud = 0
self.rx_buf_len = 0
def read_byte(self, block=False):
"""Reads a byte from the bus. This function will return None if
no character was read within the designated timeout.
The max Return Delay time is 254 x 2 usec = 508 usec (the
default is 500 usec). This represents the minimum time between
receiving a packet and sending a response.
"""
if block:
readable = True
else:
readable, _, _ = select.select([self.socket.fileno()], [], [], 0.1)
if readable:
data = self.socket.recv(1)
if data:
return data[0]
def set_parameters(self, baud, rx_buf_len):
"""Sets the baud rate and the read buffer length.
Note that for a network socket this is essentially
a no-op.
"""
self.baud = baud
self.rx_buf_len = rx_buf_len
def write_packet(self, packet_data):
"""Function implemented by a derived class which actually writes
the data to a device.
"""
self.socket.send(packet_data)
| dhylands/bioloid3 | bioloid/socket_port.py | Python | mit | 1,363 |
from rest_framework import serializers
from user.models import User
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
# set required to false else with browsable api
# each put with empty file erase existing one
avatar = serializers.ImageField(required=False)
class Meta:
model = User
fields = (
'pk',
'url',
'username',
'first_name',
'last_name',
'email',
'is_staff',
'is_active',
'is_weblog_author',
'is_librairy_member',
'avatar',
'author_name',
'website_link',
'blog_link',
'etsy_link',
'facebook_link',
'flickr_link',
'px500_link',
'twitter_link',
'gplus_link',
'pinterest_link',
'vk_link',
'insta_link',
'mail_newsletter',
'mail_contact',
'mail_registration',
'password',
)
def create(self, validated_data):
return User.objects.create(**validated_data)
def update(self, instance, validated_data):
password = validated_data.get('password', None)
if password:
instance.set_password(password)
instance.save()
return instance
class SafeUserSerializer(UserSerializer):
"""
A serialiser which writting safe data of an user
(is_staff read_only, less fields accessible).
"""
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'is_staff',
'is_weblog_author',
'is_librairy_member',
'avatar',
'author_name',
'website_link',
'blog_link',
'etsy_link',
'facebook_link',
'flickr_link',
'px500_link',
'twitter_link',
'gplus_link',
'pinterest_link',
'vk_link',
'insta_link',
'mail_newsletter',
)
read_only_fields =('is_staff', 'is_weblog_author', 'is_librairy_member')
class AuthorSerializer(serializers.ModelSerializer):
"""
A serializer to show author datas in posts.
Musn't display any private informations.
"""
# set files as charfield else we get
# api link (127.0.0.1:8000 in dev) instead
# of relative link
avatar = serializers.CharField()
class Meta:
model = User
fields = (
'username', 'author_name', 'avatar', 'blog_link',
'website_link', 'etsy_link', 'facebook_link', 'flickr_link',
'px500_link', 'twitter_link', 'gplus_link',
'pinterest_link', 'vk_link', 'insta_link',
)
read_only_fields = fields
| Fenykepy/phiroom | src/api/user/serializers.py | Python | agpl-3.0 | 3,209 |
# -*- coding: utf-8 -*-
"""
Use a HistogramLUTWidget to control the contrast / coloration of an image.
"""
## Add path to library (just for examples; you do not need this)
import initExample
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
win.resize(800,600)
win.show()
win.setWindowTitle('pyqtgraph example: Histogram LUT')
cw = QtGui.QWidget()
win.setCentralWidget(cw)
l = QtGui.QGridLayout()
cw.setLayout(l)
l.setSpacing(0)
v = pg.GraphicsView()
vb = pg.ViewBox()
vb.setAspectLocked()
v.setCentralItem(vb)
l.addWidget(v, 0, 0)
w = pg.HistogramLUTWidget()
l.addWidget(w, 0, 1)
data = pg.gaussianFilter(np.random.normal(size=(256, 256)), (20, 20))
for i in range(32):
for j in range(32):
data[i*8, j*8] += .1
img = pg.ImageItem(data)
#data2 = np.zeros((2,) + data.shape + (2,))
#data2[0,:,:,0] = data ## make non-contiguous array for testing purposes
#img = pg.ImageItem(data2[0,:,:,0])
vb.addItem(img)
vb.autoRange()
w.setImageItem(img)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| UpSea/thirdParty | pyqtgraph-0.9.10/examples/HistogramLUT.py | Python | mit | 1,350 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'DjangoApplication1.views.home', name='home'),
# url(r'^DjangoApplication1/', include('DjangoApplication1.fob.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^Oar/$', 'Oar.views.index'),
url(r'^/$', 'oar.views.main'),
url(r'^loop_nobom/$', 'Oar.views.loop_nobom'),
url(r'^loop/$', 'Oar.views.loop'),
url(r'^loop2/$', 'Oar.views.loop2'),
)
| jkorell/PTVS | Python/Tests/TestData/DjangoProject/urls.py | Python | apache-2.0 | 813 |
import time
from ctypes import *
from ctypes.wintypes import *
from comtypes import *
from comtypes.automation import *
import comtypes.client
import winKernel
import winUser
# Include functions from oleacc.dll in the module namespace.
m=comtypes.client.GetModule('oleacc.dll')
globals().update((key, val) for key, val in m.__dict__.iteritems() if not key.startswith("_"))
NAVDIR_MIN=0
NAVDIR_UP=1
NAVDIR_DOWN=2
NAVDIR_LEFT=3
NAVDIR_RIGHT=4
NAVDIR_NEXT=5
NAVDIR_PREVIOUS=6
NAVDIR_FIRSTCHILD=7
NAVDIR_LASTCHILD=8
NAVDIR_MAX=9
ROLE_SYSTEM_TITLEBAR=1
ROLE_SYSTEM_MENUBAR=2
ROLE_SYSTEM_SCROLLBAR=3
ROLE_SYSTEM_GRIP=4
ROLE_SYSTEM_SOUND=5
ROLE_SYSTEM_CURSOR=6
ROLE_SYSTEM_CARET=7
ROLE_SYSTEM_ALERT=8
ROLE_SYSTEM_WINDOW=9
ROLE_SYSTEM_CLIENT=10
ROLE_SYSTEM_MENUPOPUP=11
ROLE_SYSTEM_MENUITEM=12
ROLE_SYSTEM_TOOLTIP=13
ROLE_SYSTEM_APPLICATION=14
ROLE_SYSTEM_DOCUMENT=15
ROLE_SYSTEM_PANE=16
ROLE_SYSTEM_CHART=17
ROLE_SYSTEM_DIALOG=18
ROLE_SYSTEM_BORDER=19
ROLE_SYSTEM_GROUPING=20
ROLE_SYSTEM_SEPARATOR=21
ROLE_SYSTEM_TOOLBAR=22
ROLE_SYSTEM_STATUSBAR=23
ROLE_SYSTEM_TABLE=24
ROLE_SYSTEM_COLUMNHEADER=25
ROLE_SYSTEM_ROWHEADER=26
ROLE_SYSTEM_COLUMN=27
ROLE_SYSTEM_ROW=28
ROLE_SYSTEM_CELL=29
ROLE_SYSTEM_LINK=30
ROLE_SYSTEM_HELPBALLOON=31
ROLE_SYSTEM_CHARACTER=32
ROLE_SYSTEM_LIST=33
ROLE_SYSTEM_LISTITEM=34
ROLE_SYSTEM_OUTLINE=35
ROLE_SYSTEM_OUTLINEITEM=36
ROLE_SYSTEM_PAGETAB=37
ROLE_SYSTEM_PROPERTYPAGE=38
ROLE_SYSTEM_INDICATOR=39
ROLE_SYSTEM_GRAPHIC=40
ROLE_SYSTEM_STATICTEXT=41
ROLE_SYSTEM_TEXT=42
ROLE_SYSTEM_PUSHBUTTON=43
ROLE_SYSTEM_CHECKBUTTON=44
ROLE_SYSTEM_RADIOBUTTON=45
ROLE_SYSTEM_COMBOBOX=46
ROLE_SYSTEM_DROPLIST=47
ROLE_SYSTEM_PROGRESSBAR=48
ROLE_SYSTEM_DIAL=49
ROLE_SYSTEM_HOTKEYFIELD=50
ROLE_SYSTEM_SLIDER=51
ROLE_SYSTEM_SPINBUTTON=52
ROLE_SYSTEM_DIAGRAM=53
ROLE_SYSTEM_ANIMATION=54
ROLE_SYSTEM_EQUATION=55
ROLE_SYSTEM_BUTTONDROPDOWN=56
ROLE_SYSTEM_BUTTONMENU=57
ROLE_SYSTEM_BUTTONDROPDOWNGRID=58
ROLE_SYSTEM_WHITESPACE=59
ROLE_SYSTEM_PAGETABLIST=60
ROLE_SYSTEM_CLOCK=61
ROLE_SYSTEM_SPLITBUTTON=62
ROLE_SYSTEM_IPADDRESS=63
ROLE_SYSTEM_OUTLINEBUTTON=64
STATE_SYSTEM_NORMAL=0
STATE_SYSTEM_UNAVAILABLE=0x1
STATE_SYSTEM_SELECTED=0x2
STATE_SYSTEM_FOCUSED=0x4
STATE_SYSTEM_PRESSED=0x8
STATE_SYSTEM_CHECKED=0x10
STATE_SYSTEM_MIXED=0x20
STATE_SYSTEM_INDETERMINATE=STATE_SYSTEM_MIXED
STATE_SYSTEM_READONLY=0x40
STATE_SYSTEM_HOTTRACKED=0x80
STATE_SYSTEM_DEFAULT=0x100
STATE_SYSTEM_EXPANDED=0x200
STATE_SYSTEM_COLLAPSED=0x400
STATE_SYSTEM_BUSY=0x800
STATE_SYSTEM_FLOATING=0x1000
STATE_SYSTEM_MARQUEED=0x2000
STATE_SYSTEM_ANIMATED=0x4000
STATE_SYSTEM_INVISIBLE=0x8000
STATE_SYSTEM_OFFSCREEN=0x10000
STATE_SYSTEM_SIZEABLE=0x20000
STATE_SYSTEM_MOVEABLE=0x40000
STATE_SYSTEM_SELFVOICING=0x80000
STATE_SYSTEM_FOCUSABLE=0x100000
STATE_SYSTEM_SELECTABLE=0x200000
STATE_SYSTEM_LINKED=0x400000
STATE_SYSTEM_TRAVERSED=0x800000
STATE_SYSTEM_MULTISELECTABLE=0x1000000
STATE_SYSTEM_EXTSELECTABLE=0x2000000
STATE_SYSTEM_ALERT_LOW=0x4000000
STATE_SYSTEM_ALERT_MEDIUM=0x8000000
STATE_SYSTEM_ALERT_HIGH=0x10000000
STATE_SYSTEM_PROTECTED=0x20000000
STATE_SYSTEM_HASPOPUP=0x40000000
STATE_SYSTEM_VALID=0x7fffffff
SELFLAG_NONE=0
SELFLAG_TAKEFOCUS=1
SELFLAG_TAKESELECTION=2
SELFLAG_EXTENDSELECTION=4
SELFLAG_ADDSELECTION=8
SELFLAG_REMOVESELECTION=16
SELFLAG_VALID=32
def LresultFromObject(wParam,obj):
"""
returns a reference, similar to a handle, to the specified object.
Servers return this reference when handling WM_GETOBJECT.
@param wParam: the wParam value passed in with WM_GETOBJECT.
@type wParam: int
@param obj: the COM object instance you want a reference for.
@type obj: COMObject
@return: a reference to the object.
@rtype: int
"""
objIID=obj._iid_
return oledll.oleacc.LresultFromObject(byref(objIID),wParam,obj)
def ObjectFromLresult(res,wParam,interface):
"""
retrieves a requested interface pointer for an accessible object
based on a previously generated object reference.
@param res: the previously generated object reference.
@type res: int
@param wParam: the wParam value passed in with WM_GETOBJECT.
@type wParam: int
@param interface: the requested COM interface.
@type interface: comtypes COM interface
@return: the object.
@rtype: COMObject
"""
p=POINTER(interface)()
oledll.oleacc.ObjectFromLresult(res,byref(interface._iid_),wParam,byref(p))
return p
def CreateStdAccessibleProxy(hwnd,className,objectID,interface=IAccessible):
"""
creates an accessible object using a specific window class, with the methods and properties
of the specified type of system-provided user interface element.
@param hwnd: the handle of the window this accessible object should represent.
@type hwnd: int
@param className: the window class name to use.
@type className: basestring
@param objectID: an OBJID_* constant or custom value stating the specific object in the window.
@type objectID: int
@param interface: the requested COM interface for this object. Defaults to IAccessible.
@type interface: comtypes COM interface
@return: the created object.
@rtype: COMObject
"""
p=POINTER(interface)()
oledll.oleacc.CreateStdAccessibleProxyW(hwnd,className,objectID,byref(interface._iid_),byref(p))
return p
def CreateStdAccessibleObject(hwnd,objectID,interface=IAccessible):
"""
creates an accessible object with the methods and properties
of the specified type of system-provided user interface element.
@param hwnd: the handle of the window this accessible object should represent.
@type hwnd: int
@param objectID: an OBJID_* constant or custom value stating the specific object in the window.
@type objectID: int
@param interface: the requested COM interface for this object. Defaults to IAccessible.
@type interface: comtypes COM interface
@return: the created object.
@rtype: COMObject
"""
p=POINTER(interface)()
oledll.oleacc.CreateStdAccessibleObject(hwnd,objectID,byref(interface._iid_),byref(p))
return p
def AccessibleObjectFromWindow(hwnd,objectID,interface=IAccessible):
"""
Retreaves a COM object from the given window, with the given object ID.
@param hwnd: the handle of the window to retreave the object from.
@type hwnd: int
@param objectID: one of the OBJID_* constants or a custom positive value representing the specific object you want to retreave.
@type objectID: int
@param interface: the requested COM interface you wish to use on the retreaved object.
@type interface: comtypes COM interface
@return: the retreaved object.
@rtype: COMObject
"""
p=POINTER(interface)()
oledll.oleacc.AccessibleObjectFromWindow(hwnd,objectID,byref(p._iid_),byref(p))
return p
def AccessibleObjectFromWindow_safe(hwnd,objectID,interface=IAccessible,timeout=2):
if not hwnd:
raise ValueError("Invalid window")
wmResult=c_long()
res=windll.user32.SendMessageTimeoutW(hwnd,winUser.WM_GETOBJECT,0,objectID,winUser.SMTO_ABORTIFHUNG,int(timeout*1000),byref(wmResult))==0
if res:
raise OSError("WM_GETOBJECT failed")
if wmResult.value:
return ObjectFromLresult(wmResult.value,0,interface)
return CreateStdAccessibleObject(hwnd,objectID,interface)
def AccessibleObjectFromEvent(hwnd,objectID,childID):
"""
Retreaves an IAccessible object from the given window, with the given object ID and child ID.
@param hwnd: the handle of the window to retreave the object from.
@type hwnd: int
@param objectID: one of the OBJID_* constants or a custom positive value representing the specific object you want to retreave.
@type objectID: int
@param childID: the ID of the child element you wish to retreave.
@type childID: int
@return: the retreaved object.
@rtype: COMObject
"""
p=POINTER(IAccessible)()
varChild=VARIANT()
oledll.oleacc.AccessibleObjectFromEvent(hwnd,objectID,childID,byref(p),byref(varChild))
if varChild.vt==VT_I4:
childID=varChild.value
return (p,childID)
def AccessibleObjectFromEvent_safe(hwnd,objectID,childID,timeout=2):
obj=AccessibleObjectFromWindow_safe(hwnd,objectID,timeout=timeout)
if not obj:
raise RuntimeError("AccessibleObjectFromWindow failed")
if childID!=0:
try:
childObj=obj.accChild(childID)
except COMError:
childObj=None
if childObj:
obj=childObj
childID=0
return (obj,childID)
def WindowFromAccessibleObject(pacc):
"""
Retreaves the handle of the window this IAccessible object belongs to.
@param pacc: the IAccessible object who's window you want to fetch.
@type pacc: POINTER(IAccessible)
@return: the window handle.
@rtype: int
"""
hwnd=c_int()
oledll.oleacc.WindowFromAccessibleObject(pacc,byref(hwnd))
return hwnd.value
def AccessibleObjectFromPoint(x,y):
point=POINT(x,y)
pacc=POINTER(IAccessible)()
varChild=VARIANT()
oledll.oleacc.AccessibleObjectFromPoint(point,byref(pacc),byref(varChild))
if not isinstance(varChild.value,int):
child=0
else:
child=varChild.value
return (pacc,child)
def AccessibleChildren(pacc,iChildStart,cChildren):
varChildren=(VARIANT*cChildren)()
pcObtained=c_int()
oledll.oleacc.AccessibleChildren(pacc,iChildStart,cChildren,byref(varChildren),byref(pcObtained))
return [x.value for x in varChildren[0:pcObtained.value]]
def GetProcessHandleFromHwnd(windowHandle):
"""Retreaves a process handle of the process who owns the window.
If Windows Vista, uses GetProcessHandleFromHwnd found in oleacc.dll which allows a client with UIAccess to open a process who is elevated.
if older than Windows Vista, just uses OpenProcess from user32.dll instead.
@param windowHandle: a window of a process you wish to retreave a process handle for
@type windowHandle: integer
@returns: a process handle with read, write and operation access
@rtype: integer
"""
try:
return oledll.oleacc.GetProcessHandleFromHwnd(windowHandle)
except:
import winKernel
return winKernel.openProcess(winKernel.PROCESS_VM_READ|winKernel.PROCESS_VM_WRITE|winKernel.PROCESS_VM_OPERATION,False,winUser.getWindowThreadProcessID(windowHandle)[0])
def GetRoleText(role):
textLen=oledll.oleacc.GetRoleTextW(role,0,0)
if textLen:
buf=create_unicode_buffer(textLen+2)
oledll.oleacc.GetRoleTextW(role,buf,textLen+1)
return buf.value
else:
return None
def GetStateText(state):
textLen=oledll.oleacc.GetStateTextW(state,0,0)
if textLen:
buf=create_unicode_buffer(textLen+2)
oledll.oleacc.GetStateTextW(state,buf,textLen+1)
return buf.value
else:
return None
| ckundo/nvda | source/oleacc.py | Python | gpl-2.0 | 10,580 |
'''
AxelProxy XBMC Addon
Copyright (C) 2013 Eldorado
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
import axelcommon
import axelproxy
# This is xbmc linked class. TODO: read the settings here and send it to proxy for port etc
#TODO: check if start at launch setting is configured!
#Address and IP for Proxy to listen on
HOST_NAME = '127.0.0.1'
#HOST_NAME = 'localhost'
PORT_NUMBER = 45550 ##move this somewhere which could be configured by UI
if __name__ == '__main__':
file_dest = axelcommon.profile_path #replace this line if you want to be specific about the download folder
print file_dest
axelproxy.ProxyManager().start_proxy(port=PORT_NUMBER, host_name=HOST_NAME,download_folder=file_dest), #more param to come
| JamesLinEngineer/RKMC | addons/script.module.axel.downloader/lib/standalone_server.py | Python | gpl-2.0 | 1,470 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Display matplotlib animations as HTML5 video
#
# Based on [this notebook](http://nbviewer.ipython.org/url/jakevdp.github.io/downloads/notebooks/AnimationEmbedding.ipynb) by jakevdp. Updated with:
#
# - output video that works with chrome (pix_fmt below)
# - add plt.close() to avoid showing PNG below the animation
# - autoplay (customize the VIDEO_TAG below to change behavior, for example add loop attribute)
# <codecell>
from matplotlib import animation, pyplot as plt
from tempfile import NamedTemporaryFile
VIDEO_TAG = """<video controls autoplay>
<source src="data:{0}">
Your browser does not support the video tag.
</video>"""
def anim_to_html(anim):
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.m4v') as f:
anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264', '-pix_fmt', 'yuv420p'])
video = open(f.name, "rb").read()
anim._encoded_video = 'video/mp4;base64,' + video.encode("base64")
# prevent figure displayed as a PNG below the animation
plt.close()
return VIDEO_TAG.format(anim._encoded_video)
from IPython.display import HTML
def display_animation(anim):
plt.close(anim._fig)
return HTML(anim_to_html(anim))
| sebdiem/userpage | content/notebooks/mpl_animation_html.py | Python | mit | 1,312 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 09:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wizard_builder', '0024_remove_formquestion_added'),
]
operations = [
migrations.AlterField(
model_name='formquestion',
name='descriptive_text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='formquestion',
name='text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='page',
name='infobox',
field=models.TextField(blank=True),
),
]
| SexualHealthInnovations/django-wizard-builder | wizard_builder/migrations/0025_auto_20170915_0948.py | Python | bsd-3-clause | 781 |
# -*- coding: utf-8 -*-
#
# Cork documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 8 13:40:17 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import pkg_resources
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
__version__ = '0.5'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cork'
copyright = u"%s, Federico Ceratto" % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(__version__.split(".")[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'bw'
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
autoclass_content = "both"
autodoc_default_flags = ['show-inheritance','members','undoc-members']
autodoc_member_order = 'bysource'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bw'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"github_ribbon": True,
"github_ribbon_link": "https://github.com/FedericoCeratto/bottle-cork",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [pkg_resources.resource_filename('bw_sphinxtheme', 'themes')]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Corkdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Cork.tex', u'Cork Documentation',
u'Federico Ceratto', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cork', u'Cork Documentation',
[u'Federico Ceratto'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Cork', u'Cork Documentation',
u'Federico Ceratto', 'Cork', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mfedy/bottle-cork | docs/conf.py | Python | lgpl-3.0 | 8,280 |
# Copyright 2022 The etils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ecolab."""
import sys
from etils import ecolab
def test_capture_stdout():
with ecolab.collapse():
print('Abcd')
print('Abcd', file=sys.stderr)
| google/etils | etils/ecolab/colab_utils_test.py | Python | apache-2.0 | 753 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extra dhcp opts support
Revision ID: 53bbd27ec841
Revises: 40dffbf4b549
Create Date: 2013-05-09 15:36:50.485036
"""
# revision identifiers, used by Alembic.
revision = '53bbd27ec841'
down_revision = '40dffbf4b549'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'extradhcpopts',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('opt_name', sa.String(length=64), nullable=False),
sa.Column('opt_value', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('port_id', 'opt_name', name='uidx_portid_optname'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.drop_table('extradhcpopts')
### end Alembic commands ###
| ntt-sic/neutron | neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py | Python | apache-2.0 | 2,051 |
# This class manages how and when to play shows.
import threading, time
from object import Object
from schedule import Schedule
from series import Series
from helpers import listDirs, listFiles
from week import getDate
import settings, schedule_settings
class Manager(Object, threading.Thread):
# INIT -------------------------------------------------------------
def __init__(self):
threading.Thread.__init__(self)
Object.__init__(self)
# OBJECTS
self.scheduler = Schedule() # The schedule object
self.series = [] # List of series
# LOAD -------------------------------------------------------------
# Loads schedule from a file (schedule_settings.py).
def load(self):
# Add blocks
for block in schedule_settings.BLOCKS:
# Calculate the day
start_day = str(getDate(block[1].split()[0]))
end_day = str(getDate(block[2].split()[0]))
self.scheduler.add(start_day + " " + block[1].split()[1], end_day + " " + block[2].split()[1], "%j %H:%M", block[0])
# Start the scheduler (if it isn't already)
if not self.scheduler.running:
self.scheduler.start()
# NEXT -------------------------------------------------------------
# Picks the next show (will still obey schedule times, even if
# something is skipped)
def next(self):
pass
# BACK -------------------------------------------------------------
# Picks the previous show played.
def back(self):
pass
# THREADING --------------------------------------------------------
def run(self):
while self.scheduler.running:
time.sleep(settings.MANAGER_SLEEP)
| bombpersons/MYOT | myot/manager.py | Python | gpl-3.0 | 1,578 |
"""JSON implementations of assessment.authoring searches."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import objects
from . import queries
from .. import utilities
from ..osid import searches as osid_searches
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.assessment_authoring import searches as abc_assessment_authoring_searches
from dlkit.abstract_osid.osid import errors
class AssessmentPartSearch(abc_assessment_authoring_searches.AssessmentPartSearch, osid_searches.OsidSearch):
"""The search interface for governing assessment part searches."""
def __init__(self, runtime):
self._namespace = 'assessment.authoring.AssessmentPart'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_assessment_parts(self, bank_ids):
"""Execute this search among the given list of assessment parts.
arg: bank_ids (osid.id.IdList): list of assessment parts
raise: NullArgument - ``bank_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = bank_ids
@utilities.arguments_not_none
def order_assessment_part_results(self, assessment_part_search_order):
"""Specify an ordering to the search results.
arg: assessment_part_search_order
(osid.assessment.authoring.AssessmentPartSearchOrder):
assessment part search order
raise: NullArgument - ``assessment_part_search_order`` is
``null``
raise: Unsupported - ``assessment_part_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_assessment_part_search_record(self, assessment_part_search_record_type):
"""Gets the assessment part search record corresponding to the given assessment part search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: assessment_part_search_record_type (osid.type.Type): an
assessment part search record type
return:
(osid.assessment.authoring.records.AssessmentPartSearchR
ecord) - the assessment part search record
raise: NullArgument - ``assessment_part_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_part_search_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class AssessmentPartSearchResults(abc_assessment_authoring_searches.AssessmentPartSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'assessment.authoring.AssessmentPart'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_assessment_parts(self):
"""Gets the ``AssessmentPartList`` resulting from a search.
return: (osid.assessment.authoring.AssessmentPartList) - the
assessment part list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.AssessmentPartList(self._results, runtime=self._runtime)
assessment_parts = property(fget=get_assessment_parts)
def get_assessment_part_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.assessment.authoring.AssessmentPartQueryInspector)
- the assessment part query inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.AssessmentPartQueryInspector(self._query_terms, runtime=self._runtime)
assessment_part_query_inspector = property(fget=get_assessment_part_query_inspector)
@utilities.arguments_not_none
def get_assessment_part_search_results_record(self, assessment_part_search_record_type):
"""Gets the assessment part search results record corresponding to the given assessment part search record ``Type``.
This method must be used to retrieve an object implementing the
requested record.
arg: assessment_part_search_record_type (osid.type.Type): an
assessment part search record type
return: (osid.assessment.authoring.records.AssessmentPartSearchR
esultsRecord) - the assessment part search results
record
raise: NullArgument - ``assessment_part_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported -
``has_record_type(assessment_part_search_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class SequenceRuleSearch(abc_assessment_authoring_searches.SequenceRuleSearch, osid_searches.OsidSearch):
"""The search interface for governing sequence rule searches."""
def __init__(self, runtime):
self._namespace = 'assessment.authoring.SequenceRule'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_sequence_rules(self, bank_ids):
"""Execute this search among the given list of sequence rules.
arg: bank_ids (osid.id.IdList): list of sequence rules
raise: NullArgument - ``bank_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = bank_ids
@utilities.arguments_not_none
def order_sequence_rule_results(self, sequence_rule_search_order):
"""Specify an ordering to the search results.
arg: sequence_rule_search_order
(osid.assessment.authoring.SequenceRuleSearchOrder):
sequence rule search order
raise: NullArgument - ``sequence_rule_search_order`` is
``null``
raise: Unsupported - ``sequence_rule_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_sequence_rule_search_record(self, sequence_rule_search_record_type):
"""Gets the sequence rule search record corresponding to the given sequence rule search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: sequence_rule_search_record_type (osid.type.Type): a
sequence rule search record type
return:
(osid.assessment.authoring.records.SequenceRuleSearchRec
ord) - the sequence rule search record
raise: NullArgument - ``sequence_rule_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(sequence_rule_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class SequenceRuleSearchResults(abc_assessment_authoring_searches.SequenceRuleSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'assessment.authoring.SequenceRule'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_sequence_rules(self):
"""Gets the ``SequenceRuleList`` resulting from a search.
return: (osid.assessment.authoring.SequenceRuleList) - the
sequence rule list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.SequenceRuleList(self._results, runtime=self._runtime)
sequence_rules = property(fget=get_sequence_rules)
def get_sequence_rule_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.assessment.authoring.SequenceRuleQueryInspector) -
the sequence rule query inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.SequenceRuleQueryInspector(self._query_terms, runtime=self._runtime)
sequence_rule_query_inspector = property(fget=get_sequence_rule_query_inspector)
@utilities.arguments_not_none
def get_sequence_rule_search_results_record(self, sequence_rule_search_record_type):
"""Gets the sequence rule search results record corresponding to the given sequence rule search record ``Type``.
This method must be used to retrieve an object implementing the
requested record.
arg: sequence_rule_search_record_type (osid.type.Type): a
sequence rule search record type
return: (osid.assessment.authoring.records.SequenceRuleSearchRes
ultsRecord) - the sequence rule search results record
raise: NullArgument - ``sequence_rule_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported -
``has_record_type(sequence_rule_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| mitsei/dlkit | dlkit/json_/assessment_authoring/searches.py | Python | mit | 12,132 |
x = 0
print 'Enter a test score between 60 and 100:'
while x < 10:
score = input()
if (score < 60):
print "Score:", score, "You can't follow directions, and you are stupid"
elif (score >= 60 and score < 70):
print "Score:", score, 'Your grade is D'
elif (score >= 70 and score < 80):
print "Score:", score, 'Your grade is C'
elif (score >= 80 and score < 90):
print "Score:", score, 'Your grade is B'
else:
print "Score:", score, 'Your grade is A'
x = x + 1
print 'End of the program. KayBaiiiiiiiiiiii!' | jiobert/python | Horan_Colby/Assignments/scores-grades.py | Python | mit | 521 |
from django_filters import filterset, filters
from facebook_data.models import FacebookAdvert
class FacebookAdvertFilterSet(filterset.FilterSet):
class Meta:
model = FacebookAdvert
fields = ["person_id"]
person_id = filters.Filter(
field_name="person_id",
label="Person ID",
help_text="The person who's page ran the advert",
)
| DemocracyClub/yournextrepresentative | ynr/apps/facebook_data/filters.py | Python | agpl-3.0 | 383 |
import json
import pytest
from allennlp.common.testing import ModelTestCase
class ModelWithIncorrectValidationMetricTest(ModelTestCase):
"""
This test case checks some validating functionality that is implemented
in `ensure_model_can_train_save_and_load`
"""
def setup_method(self):
super().setup_method()
self.set_up_model(
self.FIXTURES_ROOT / "simple_tagger" / "model_test_case.jsonnet",
self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv",
)
def test_01_test_validation_metric_does_not_exist(self):
overrides = {"trainer.num_epochs": 2}
pytest.raises(
AssertionError,
self.ensure_model_can_train_save_and_load,
self.param_file,
metric_to_check="non_existent_metric",
metric_terminal_value=0.0,
overrides=json.dumps(overrides),
)
def test_02a_test_validation_metric_terminal_value_not_set(self):
pytest.raises(
AssertionError,
self.ensure_model_can_train_save_and_load,
self.param_file,
metric_to_check="accuracy",
metric_terminal_value=None,
)
def test_02b_test_validation_metric_terminal_value_not_met(self):
pytest.raises(
AssertionError,
self.ensure_model_can_train_save_and_load,
self.param_file,
metric_to_check="accuracy",
metric_terminal_value=0.0,
)
def test_03_test_validation_metric_exists_and_its_terminal_value_is_met(self):
self.ensure_model_can_train_save_and_load(
self.param_file,
metric_to_check="accuracy",
metric_terminal_value=1.0,
)
| allenai/allennlp | tests/models/test_model_test_case.py | Python | apache-2.0 | 1,751 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
# pylint: disable=no-else-return, no-else-continue
"""Caffe frontend."""
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .common import ExprTable
from .common import infer_shape as _infer_shape
__all__ = ["from_caffe"]
class OperatorConverter(object):
"""Operator Converted for converting Caffe ops to Relay ops"""
def __init__(self, init_layer_dict, predict_layer, exp_tab):
self.init_layer_dict = init_layer_dict
self.predict_layer = predict_layer
self.exp_tab = exp_tab
self.new_bn = {}
self.changed_layers = None
self.convert_map = {
"BatchNorm": self.convert_batch_norm,
"Concat": self.convert_concat,
"Convolution": self.convert_conv,
"Crop": self.convert_crop,
"Deconvolution": self.convert_deconv,
"Dropout": self.convert_dropout,
"Eltwise": self.convert_eltwise,
"Embed": self.convert_embed,
"Flatten": self.convert_flatten,
"InnerProduct": self.convert_innerproduct,
"Input": None,
"LRN": self.convert_lrn,
"Pooling": self.convert_pooling,
"PReLU": self.convert_prelu,
"ReLU": self.convert_relu,
"Reshape": self.convert_reshape,
"Scale": self.convert_scale,
"Sigmoid": self.convert_sigmoid,
"Slice": self.convert_slice,
"Softmax": self.convert_softmax,
"TanH": self.convert_tanh,
}
def convert_flatten(self, op):
"""Convert Flatten layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
flatten_params = op.flatten_param.axis
assert flatten_params == 1, "flatten axis should be 1"
out = _op.nn.batch_flatten(in_expr)
return out
def convert_eltwise(self, op):
"""Convert Eltwise layer"""
inputs = op.bottom
assert len(inputs) == 2, "input tensors length should be 2"
lhs_expr = self.exp_tab.get_expr(inputs[0])
rhs_expr = self.exp_tab.get_expr(inputs[1])
lhs_shape = _infer_shape(lhs_expr)
rhs_shape = _infer_shape(rhs_expr)
assert lhs_shape == rhs_shape, "input tensors shape should be equal"
eltwise_params = op.eltwise_param
eltwise_type_dict = ["PROD", "SUM", "MAX"]
eltwise_type = eltwise_params.operation
coeff = list(eltwise_params.coeff)
if eltwise_type_dict[eltwise_type] == "PROD":
out = _op.multiply(lhs_expr, rhs_expr)
elif eltwise_type_dict[eltwise_type] == "SUM":
if coeff:
left_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[0], np.float32))
right_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[1], np.float32))
lhs_expr_scale = _op.multiply(lhs_expr, left_coeff_expr)
rhs_expr_scale = _op.multiply(rhs_expr, right_coeff_expr)
out = _op.add(lhs_expr_scale, rhs_expr_scale)
else:
out = _op.add(lhs_expr, rhs_expr)
elif eltwise_type_dict[eltwise_type] == "MAX":
out = _op.maximum(lhs_expr, rhs_expr)
else:
raise tvm.error.OpNotImplemented(
"eltwise_type {} is not supported for frontend Caffe.".format(eltwise_type)
)
return out
def _parse_conv_params(self, op):
"""Parse the parameters of Convolution and Deconvolution layer"""
nonzone = lambda val, pos, dflt: val[pos] if pos < len(val) else dflt
conv_params = op.convolution_param
params = dict()
# parse kernel size
if conv_params.kernel_h > 0 or conv_params.kernel_w > 0:
params["kernel_size"] = (conv_params.kernel_h, conv_params.kernel_w)
else:
ksize_h = nonzone(conv_params.kernel_size, 0, 1)
ksize_w = nonzone(conv_params.kernel_size, 1, ksize_h)
params["kernel_size"] = (ksize_h, ksize_w)
# parse padding size
if conv_params.pad_h > 0 or conv_params.pad_w > 0:
params["padding"] = (conv_params.pad_h, conv_params.pad_w)
else:
pad_h = nonzone(conv_params.pad, 0, 0)
pad_w = nonzone(conv_params.pad, 1, pad_h)
params["padding"] = (pad_h, pad_w)
# parse stride size
if conv_params.stride_h > 0 or conv_params.stride_w > 0:
params["strides"] = (conv_params.stride_h, conv_params.stride_w)
else:
stride_h = nonzone(conv_params.stride, 0, 1)
stride_w = nonzone(conv_params.stride, 1, stride_h)
params["strides"] = (stride_h, stride_w)
# parse dilation size
if hasattr(conv_params, "dilation") and len(conv_params.dilation) > 0:
dilation = " ".join(str(d) for d in conv_params.dilation)
dilation = tuple(map(int, dilation.split(" ")))
params["dilation"] = dilation
if len(dilation) == 1:
params["dilation"] = (dilation[0], dilation[0])
params["kernel_layout"] = "OIHW"
params["data_layout"] = "NCHW"
params["groups"] = conv_params.group
params["channels"] = conv_params.num_output
return params
def convert_batch_norm(self, op):
"""Convert BatchNorm layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
n, c, h, w = _infer_shape(in_expr)
if op.name in self.new_bn:
mean, var, eps, gamma, beta = self.new_bn[op.name]
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var_expr = self.exp_tab.new_const(var, dtype="float32")
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=eps, scale=True
)
else:
weight_bias_blobs = self.init_layer_dict[op.name].blobs
mean = np.asarray(weight_bias_blobs[0].data, np.float32)
var = np.asarray(weight_bias_blobs[1].data, np.float32)
if len(weight_bias_blobs) == 2:
mean = np.repeat(mean, h * w).reshape((c, h, w))
mean = np.expand_dims(mean, 0).repeat(n, axis=0)
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var = np.repeat(var, h * w).reshape((c, h, w))
var = np.expand_dims(var, 0).repeat(n, axis=0)
var_expr = self.exp_tab.new_const(var, dtype="float32")
tmp_out = _op.multiply(in_expr, mean_expr)
out = _op.add(tmp_out, var_expr)
return out
else:
scale = np.asarray(weight_bias_blobs[2].data, np.float32)
if scale:
scale = 1 / scale
mean_expr = self.exp_tab.new_const(mean * scale, dtype="float32")
var_expr = self.exp_tab.new_const(var * scale, dtype="float32")
# caffe bn layer not support scale
gamma_expr = self.exp_tab.new_const(
np.ones(mean.shape, dtype=np.float32), dtype="float32"
)
beta_expr = self.exp_tab.new_const(
np.zeros(mean.shape, dtype=np.float32), dtype="float32"
)
bn_params = op.batch_norm_param.eps
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=bn_params, scale=False
)
return out[0]
def convert_scale(self, op):
"""Convert Scale layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
weight_bias_blobs = self.init_layer_dict[op.name].blobs
params = dict()
params["bias"] = op.scale_param.bias_term
params["axis"] = op.scale_param.axis
gamma = np.asarray(weight_bias_blobs[0].data, np.float32)
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
if params["bias"]:
beta = np.asarray(weight_bias_blobs[1].data, np.float32)
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
else:
beta_expr = self.exp_tab.new_const(
np.zeros(gamma.shape, dtype=np.float32), dtype="float32"
)
_, c, _, _ = _infer_shape(in_expr)
gamma_expr = _op.reshape(gamma_expr, newshape=(1, c, 1, 1))
beta_expr = _op.reshape(beta_expr, newshape=(1, c, 1, 1))
out = _op.multiply(in_expr, gamma_expr)
out = _op.add(out, beta_expr)
return out
def convert_concat(self, op):
"""Convert Concat layer"""
inputs = op.bottom
in_expr = (self.exp_tab.get_expr(inputs[i]) for i in range(len(inputs)))
c_params = dict()
c_params["axis"] = op.concat_param.axis
out = _op.concatenate(in_expr, axis=c_params["axis"])
return out
def convert_reshape(self, op):
"""Convert Reshape layer"""
inputs = op.bottom
input_name = inputs[0]
reshape_param = op.reshape_param
dims = list(reshape_param.shape.dim)
in_expr = self.exp_tab.get_expr(input_name)
input_shape = list(_infer_shape(in_expr))
start_axis = int(reshape_param.axis)
if start_axis < 0:
start_axis = len(input_shape) + start_axis + 1
num_axes = int(reshape_param.num_axes)
end_axis = len(input_shape)
if num_axes != -1:
end_axis = start_axis + num_axes
left_shape = input_shape[:start_axis]
if end_axis == len(input_shape):
center_shape = input_shape[start_axis:]
right_shape = []
else:
center_shape = input_shape[start_axis:end_axis]
right_shape = input_shape[end_axis:]
for idx, dim in enumerate(dims):
if dim == 0:
dims[idx] = center_shape[idx]
tmp = np.random.rand(*center_shape)
tmp = np.reshape(tmp, dims)
center_shape = list(tmp.shape)
newshape = left_shape + center_shape + right_shape
out = _op.reshape(in_expr, newshape=newshape)
return out
def convert_softmax(self, op):
"""Convert Softmax layer"""
inputs = op.bottom
assert len(inputs) == 1, "input tensors length should be 1"
input_name = inputs[0]
in_expr = self.exp_tab.get_expr(input_name)
softmax_param = op.softmax_param
parmas = {"axis": softmax_param.axis}
out = _op.nn.softmax(in_expr, **parmas)
return out
def convert_conv(self, op):
"""Convert Convolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [conv_params.num_output, -1, kh, kw]
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr)
return out
def convert_pooling(self, op):
"""Convert Pooling layer"""
inputs = op.bottom
input_name = inputs[0]
pool_params = op.pooling_param
pool_type_dict = ["MAX", "AVE", "STOCHASTIC"]
params = dict()
# parse pool type: 0: MAX, 1: AVE, 2: STOCHASTIC
pool_type = pool_params.pool
# parse kernel size
if pool_params.kernel_h > 0 or pool_params.kernel_w > 0:
params["pool_size"] = (pool_params.kernel_h, pool_params.kernel_w)
else:
params["pool_size"] = (pool_params.kernel_size, pool_params.kernel_size)
# parse padding size
if pool_params.pad_h > 0 or pool_params.pad_w > 0:
params["padding"] = (pool_params.pad_h, pool_params.pad_w)
else:
params["padding"] = (pool_params.pad, pool_params.pad)
# parse stride size
if pool_params.stride_h > 0 or pool_params.stride_w > 0:
params["strides"] = (pool_params.stride_h, pool_params.stride_w)
else:
params["strides"] = (pool_params.stride, pool_params.stride)
params["ceil_mode"] = True
if hasattr(pool_params, "round_mode"):
params["ceil_mode"] = pool_params.round_mode == "CEIL"
in_expr = self.exp_tab.get_expr(input_name)
if pool_type_dict[pool_type] == "MAX":
if pool_params.global_pooling:
out = _op.nn.global_max_pool2d(in_expr)
else:
if len(op.top) == 1:
out = _op.nn.max_pool2d(in_expr, **params)
elif len(op.top) == 2:
out1 = _op.nn.max_pool2d_with_argmax(in_expr, **params)
out2 = _op.vision.max_pool2d_location(in_expr, **params)
return _expr.Tuple((out1, out2))
elif pool_type_dict[pool_type] == "AVE": # AVE
if pool_params.global_pooling:
out = _op.nn.global_avg_pool2d(in_expr)
else:
params["count_include_pad"] = True
out = _op.nn.avg_pool2d(in_expr, **params)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Caffe.".format(
pool_type_dict[pool_type] + " pool"
)
)
return out
def convert_lrn(self, op):
"""Convert LRN layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
lrn_params = op.lrn_param
params["size"] = lrn_params.local_size
params["bias"] = lrn_params.k
params["alpha"] = lrn_params.alpha
params["beta"] = lrn_params.beta
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.lrn(in_expr, **params)
return out
def convert_innerproduct(self, op):
"""Convert InnerProduct layer"""
inputs = op.bottom
weight_bias_blobs = self.init_layer_dict[op.name].blobs
dense_params = op.inner_product_param
params = dict()
params["num_output"] = dense_params.num_output
params["bias"] = dense_params.bias_term
params["axis"] = dense_params.axis
if params["axis"] != 1:
raise Exception("Only support 2D InnerProduct")
# process weight and bias blobs
weight, bias = None, None
if params["bias"]:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, (params["num_output"], -1))
weight_shape = weight_value.shape
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
in_reshape = _op.reshape(data=in_expr, newshape=(-1, weight_shape[-1]))
out = _op.nn.dense(data=in_reshape, weight=weight_expr)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr, axis=params["axis"])
return out
def convert_dropout(self, op):
"""Convert Dropout layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
dropout_params = op.dropout_param
params["rate"] = dropout_params.dropout_ratio
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.dropout(in_expr, **params)
return out
def convert_relu(self, op):
"""Convert ReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
negative_slope = op.relu_param.negative_slope
if negative_slope:
out = _op.nn.leaky_relu(in_expr, negative_slope)
return out
out = _op.nn.relu(in_expr)
return out
def convert_prelu(self, op):
"""Convert PReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
alpha = self.init_layer_dict[op.name].blobs[0].data
alpha = np.asarray(alpha, np.float32)
alpha = self.exp_tab.new_const(alpha, dtype="float32")
axis = 1
out = _op.nn.prelu(in_expr, alpha, axis=axis)
return out
def convert_deconv(self, op):
"""Convert Deconvolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [-1, conv_params.num_output, kh, kw]
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
# weight shape is in relay's IOHW format rn, we need it to be OIHW
weight_value = np.transpose(weight_value, [1, 0, 2, 3])
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.nn.conv2d_transpose(data=in_expr, weight=weight_expr, **params)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr)
return out
def convert_slice(self, op):
"""Convert Slice layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
output_num = len(op.top)
slice_params = op.slice_param
axis = int(slice_params.axis)
indices_or_sections = list([int(s) for s in slice_params.slice_point])
if len(indices_or_sections) == 0:
indices_or_sections = output_num
else:
indices_or_sections = sorted(indices_or_sections)
out = _op.split(in_expr, indices_or_sections=indices_or_sections, axis=axis)
return out
def convert_sigmoid(self, op):
"""Convert Sigmoid layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.sigmoid(in_expr)
return out
def convert_tanh(self, op):
"""Convert TanH layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.tanh(in_expr)
return out
def convert_crop(self, op):
"""Convert Crop layer"""
inputs = op.bottom
assert len(inputs) == 2, "Need two inputs of Crop layer"
in_expr_a = self.exp_tab.get_expr(inputs[0])
in_expr_b = self.exp_tab.get_expr(inputs[1])
# parse crop params
crop_params = op.crop_param
axis = int(getattr(crop_params, "axis", 2))
offset = list(getattr(crop_params, "offset", 0))
# expand offset to (offset1, offset2, ...)
in_a_shape = _infer_shape(in_expr_a)
num_to_crop = len(in_a_shape) - axis
if not offset:
offset = [0] * num_to_crop
if len(offset) == 1:
offset = offset * num_to_crop
elif len(offset) != num_to_crop:
raise Exception("No matching the number between axis and offset!")
slice_end = in_a_shape
slice_start = [0] * len(in_a_shape)
for i in range(num_to_crop):
slice_start[i + axis] = offset[i]
to_crop_axis = list(range(len(in_a_shape)))
to_crop_axis = to_crop_axis[axis:]
# secondly, crop in_expr_a by in_expr_b
in_expr_a_stride = _op.strided_slice(in_expr_a, slice_start, slice_end)
out = _op.slice_like(in_expr_a_stride, in_expr_b, axes=to_crop_axis)
return out
def convert_embed(self, op):
"""Convert Embed layer"""
inputs = op.bottom
embed_param = op.embed_param
num_output = embed_param.num_output
input_dim = embed_param.input_dim
bias_term = embed_param.bias_term
weight_bias_blobs = self.init_layer_dict[op.name].blobs
weight, bias = None, None
if bias_term:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
assert weight and bias
else:
weight = weight_bias_blobs[0]
assert weight
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, [input_dim, num_output])
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
input_shape = _infer_shape(in_expr)
input_count = 1
for dim in input_shape:
input_count *= dim
index = _op.cast(in_expr, "int32")
out = _op.take(weight_expr, index, axis=0)
if bias_term:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.reshape(out, [input_count, num_output])
out = _op.add(out, bias_expr)
out_shape = list(input_shape)
out_shape.append(num_output)
out = _op.reshape(out, out_shape)
return out
def check_unsupported_ops(self):
"""Check unsupported Caffe ops in our converter."""
unsupported_ops_set = set()
include_layer = dict()
for pl in self.predict_layer:
if pl.type not in include_layer:
include_layer[pl.type] = 1
else:
include_layer[pl.type] = include_layer[pl.type] + 1
for pl in self.predict_layer:
op_name = pl.type
if op_name not in self.convert_map:
unsupported_ops_set.add(op_name)
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "Caffe: {}"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise tvm.error.OpNotImplemented(msg.format(ops))
def fuse_op(self, layers):
"""Fusing the BatchNorm and Scale layer"""
bn, scale = layers["bn"], layers["scale"]
# bn params
bn_weight_bias_blobs = self.init_layer_dict[bn.name].blobs
bn_scale = np.asarray(bn_weight_bias_blobs[2].data, np.float32)
if bn_scale:
bn_scale = 1 / bn_scale
bn_mean = np.asarray(bn_weight_bias_blobs[0].data, np.float32) * bn_scale
bn_var = np.asarray(bn_weight_bias_blobs[1].data, np.float32) * bn_scale
bn_eps = bn.batch_norm_param.eps
# scale params
scale_weight_bias_blobs = self.init_layer_dict[scale.name].blobs
scale_gamma = np.asarray(scale_weight_bias_blobs[0].data, np.float32)
scale_bias = scale.scale_param.bias_term
if scale_bias:
scale_beta = np.asarray(scale_weight_bias_blobs[1].data, np.float32)
else:
scale_beta = np.zeros(scale_gamma.shape, dtype=np.float32)
# new params
self.new_bn[bn.name] = [bn_mean, bn_var, bn_eps, scale_gamma, scale_beta]
return bn
def op_fuse(self):
"""fuse bn and scale"""
new_layers = []
temp_layers = {}
changed_layers = {}
for index, pl in enumerate(self.predict_layer):
op_type = pl.type
if op_type == "Input":
new_layers.append(pl)
continue
elif op_type == "BatchNorm":
if (index != len(self.predict_layer) - 1) and (
self.predict_layer[index + 1].type == "Scale"
):
temp_layers["bn"] = pl
continue
else:
new_layers.append(pl)
temp_layers.clear()
elif op_type == "Scale":
if self.predict_layer[index - 1].type == "BatchNorm":
temp_layers["scale"] = pl
else:
new_layers.append(pl)
temp_layers.clear()
else:
temp_layers.clear()
if len(temp_layers) == 2:
layer = self.fuse_op(temp_layers)
new_layers.append(layer)
changed_layers[temp_layers["scale"].name] = temp_layers["bn"].name
for idx, plt in enumerate(pl.bottom):
if plt in changed_layers:
pl.bottom[idx] = changed_layers[plt]
if op_type not in ["BatchNorm", "Scale"]:
new_layers.append(pl)
self.predict_layer = new_layers
self.changed_layers = changed_layers
def convert_op_to_relay(self):
"""Convert Caffe ops to relay ops"""
for pl in self.predict_layer:
op_type = pl.type
if op_type == "Input":
continue
output_tensors = pl.top
ret = self.convert_map[op_type](pl)
if len(output_tensors) == 1:
self.exp_tab.set_expr(output_tensors[0], ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(output_tensor, ret[idx])
def _rebuild_layers(predict_layer):
"""Rebuild caffe layer. If the the caffe net include in-place layers, repalce its top
with its name and update the bottom of other layer that is related to it.
"""
# dict of input name that will be changed to new name
changed_top_dict = dict()
for pl in predict_layer:
if pl.type == "Input":
continue
# if current layer has single input and output and input equals to output
# it means that the layer does "in-place"
if len(pl.top) == 1 and len(pl.bottom) == 1:
if pl.top[0] == pl.bottom[0]:
# change current layer's input firstly
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# update "change" dict
changed_top_dict[pl.top[0]] = pl.name
# change current layer's output to its name
pl.top[0] = pl.name
else:
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# if the layer does not
else:
for index, plt in enumerate(pl.bottom):
if plt in changed_top_dict:
pl.bottom[index] = changed_top_dict[plt]
def _get_inputs_outputs(predict_layer):
"""Obtain Caffe model's inputs and outpus"""
# model inputs / outputs
model_inputs = list()
model_outputs = list()
# The bottoms of every layer can not be as outputs
not_outputs = set()
for pl in predict_layer:
if pl.type == "Input":
assert len(pl.top) == 1, "The number of Input layer's output is more than 1."
model_inputs.append(pl.top[0])
for i in pl.bottom:
not_outputs.add(i)
for pl in predict_layer:
if len(pl.bottom) > 0:
for t in pl.top:
if t not in not_outputs:
model_outputs.append(t)
return model_inputs, model_outputs
def from_caffe(init_net, predict_net, shape_dict, dtype_dict):
"""Convert from caffe model into compatible relay Function.
Parameters
----------
init_net : caffe_pb2.NetParameter
caffemodel
predict_net : caffe_pb2.NetParameter
caffe prototxt
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.NDArray
The parameter dict to be used by relay
"""
old_caffe = False
if len(predict_net.input) != 0: # old caffe version
old_caffe = True
model_inputs = list(predict_net.input)
predict_layer = predict_net.layer
# replace layer's top with its name and update other layers'bottoms
_rebuild_layers(predict_layer)
# obtain inputs and outputs of Net
if old_caffe:
_, model_outputs = _get_inputs_outputs(predict_layer)
else:
model_inputs, model_outputs = _get_inputs_outputs(predict_layer)
exp_tab = ExprTable()
for in_name in model_inputs:
shape = shape_dict[in_name] if in_name in shape_dict else None
dtype = dtype_dict[in_name] if in_name in dtype_dict else "float32"
exp_tab.set_expr(in_name, _expr.var(in_name, shape=shape, dtype=dtype))
if list(init_net.layer):
init_layer = init_net.layer
else:
init_layer = init_net.layers
init_layer_dict = {il.name: il for il in init_layer}
# op code in model
op_converter = OperatorConverter(init_layer_dict, predict_layer, exp_tab)
op_converter.check_unsupported_ops()
op_converter.op_fuse()
op_converter.convert_op_to_relay()
# params and outputs
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = list()
for n in model_outputs:
if n in op_converter.changed_layers:
n = op_converter.changed_layers[n]
outputs.append(exp_tab.get_expr(n))
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
mod = IRModule.from_expr(func)
return mod, params
| Laurawly/tvm-1 | python/tvm/relay/frontend/caffe.py | Python | apache-2.0 | 32,058 |
from .main import PipelineElement
from .data import SimulationData
class Producer(PipelineElement):
def __init__(self):
PipelineElement.__init__(self)
self.output = None
def connect(self, mediator):
self._sink(mediator)
def disconnect(self, mediator=None):
if mediator is None:
self._unsink_all(self._sinks())
else:
self._unsink(mediator)
@property
def mediators(self):
return self._sinks()
@mediators.setter
def mediators(self, values):
self._unsink_all(self._sinks())
self._sink_all(values)
@property
def output(self):
return self._output
@output.setter
def output(self, value):
self._output = value
def produce(self):
raise NotImplementedError("")
class SimulationDataProducer(Producer):
def __init__(self, simulation_variable, downsampler):
Producer.__init__(self)
self.output = SimulationData(simulation_variable)
self.simulation_variable = simulation_variable
self.downsampler = downsampler
@property
def simulation_variable(self):
return self._simulation_variable
@simulation_variable.setter
def simulation_variable(self, value):
self._simulation_variable = value
self.output._simulation_variable = value
@property
def downsampler(self):
return self._downsampler
@downsampler.setter
def downsampler(self, value):
self._downsampler = value
# def _produce_continuous_non_uniform(self):
# """
# Produces data from a continuous non uniform dataset.
# Since simtime increases by a constant amount each tick,
# the value of a visualizable is computed by taking a weighted
# average of its values at the timepoints before and after/at
# the current simulation time.
# Loop Invariant => times[index] >= simtime
# """
# try:
# input = self.input[CONTINUOUS_NON_UNIFORM_SERIES]
# except:
# return
# for variable in input:
# visualizables = input[variable]["visualizables"]
# index = input[variable]["index"]
# times = input[variable]["times"]
# try:
# while times[index] < self.simtime:
# index = index + 1
# difference = times[index] - times[index - 1]
# left_weight = (times[index] - self.simtime) / difference
# right_weight = (self.simtime - times[index - 1]) / difference
# for visualizable in visualizables:
# values = visualizables[visualizable]
# left_value = values[index]
# right_value = values[index + 1]
# value = (
# left_weight * left_value + right_weight * right_value
# )
# self.output.assign(variable, visualizable, value)
# except IndexError:
# for visualizable in visualizables:
# self.output.assign(variable, visualizable, None)
# def _produce_event_times(self):
# input = self.input[SPIKE_TIMES]
# for variable in input:
# for visualizable in input[variable]:
# try:
# spike_times = input[variable][visualizable]
# spike_time = spike_times[spike_times[0]]
# if abs(spike_time - self.simtime) < moogli.epsilon:
# spike_times[0] += 1
# self.output.assign(variable, visualizable, True)
# else:
# self.output.assign(variable, visualizable, False)
# except IndexError:
# self.output.assign(variable, visualizable, None)
# def _produce_spike_series(self):
# input = self.input[SPIKE_SERIES]
# for variable in input:
# for visualizable in input[variable]:
# try:
# value = input[variable][visualizable][self.tick]
# self.output.assign(variable, visualizable, value)
# except IndexError:
# self.output.assign(variable, visualizable, None)
# CONTINUOUS_UNIFORM_SERIES
# CONTINUOUS_NON_UNIFORM_SERIES
# EVENT_UNIFORM_SERIES
# EVENT_NON_UNIFORM_SERIES
# def _produce_continuous_uniform_series(self):
# input = self.
# def _produce_
| dilawar/moogli | moogli/visualization/pipeline/producer.py | Python | gpl-2.0 | 4,578 |
"""
Django settings for cbs project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '51ff&6zumcwpo8+60&5+dg5nqh6-ehdo@uk-xi$*paicy7b4e%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'p311',
'p365',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cbs.urls'
WSGI_APPLICATION = 'cbs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
| max1k/cbs | cbs/settings.py | Python | gpl-2.0 | 2,196 |
# Copyright (C) 2010 Simon Wessing
# TU Dortmund University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Simon Wessing"
class InnerHyperVolume:
"""
Hypervolume computation based on variant 3 of the algorithm in the paper:
C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep
algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary
Computation, pages 1157-1163, Vancouver, Canada, July 2006.
Minimization is implicitly assumed here!
"""
def __init__(self, referencePoint):
"""Constructor."""
self.referencePoint = referencePoint
self.list = []
def compute(self, front):
"""Returns the hypervolume that is dominated by a non-dominated front.
Before the HV computation, front and reference point are translated, so
that the reference point is [0, ..., 0].
"""
def weaklyDominates(point, other):
for i in xrange(len(point)):
if point[i] > other[i]:
return False
return True
relevantPoints = []
referencePoint = self.referencePoint
dimensions = len(referencePoint)
for point in front:
# only consider points that dominate the reference point
if weaklyDominates(point, referencePoint):
relevantPoints.append(point)
if any(referencePoint):
# shift points so that referencePoint == [0, ..., 0]
# this way the reference point doesn't have to be explicitly used
# in the HV computation
for j in xrange(len(relevantPoints)):
relevantPoints[j] = [relevantPoints[j][i] - referencePoint[i] for i in xrange(dimensions)]
self.preProcess(relevantPoints)
bounds = [-1.0e308] * dimensions
hyperVolume = self.hvRecursive(dimensions - 1, len(relevantPoints), bounds)
return round(hyperVolume, 4)
def hvRecursive(self, dimIndex, length, bounds):
"""Recursive call to hypervolume calculation.
In contrast to the paper, the code assumes that the reference point
is [0, ..., 0]. This allows the avoidance of a few operations.
"""
hvol = 0.0
sentinel = self.list.sentinel
if length == 0:
return hvol
elif dimIndex == 0:
# special case: only one dimension
# why using hypervolume at all?
return -sentinel.next[0].cargo[0]
elif dimIndex == 1:
# special case: two dimensions, end recursion
q = sentinel.next[1]
h = q.cargo[0]
p = q.next[1]
while p is not sentinel:
pCargo = p.cargo
hvol += h * (q.cargo[1] - pCargo[1])
if pCargo[0] < h:
h = pCargo[0]
q = p
p = q.next[1]
hvol += h * q.cargo[1]
return hvol
else:
remove = self.list.remove
reinsert = self.list.reinsert
hvRecursive = self.hvRecursive
p = sentinel
q = p.prev[dimIndex]
while q.cargo != None:
if q.ignore < dimIndex:
q.ignore = 0
q = q.prev[dimIndex]
q = p.prev[dimIndex]
while length > 1 and (q.cargo[dimIndex] > bounds[dimIndex] or q.prev[dimIndex].cargo[dimIndex] >= bounds[dimIndex]):
p = q
remove(p, dimIndex, bounds)
q = p.prev[dimIndex]
length -= 1
qArea = q.area
qCargo = q.cargo
qPrevDimIndex = q.prev[dimIndex]
if length > 1:
hvol = qPrevDimIndex.volume[dimIndex] + qPrevDimIndex.area[dimIndex] * (qCargo[dimIndex] - qPrevDimIndex.cargo[dimIndex])
else:
qArea[0] = 1
qArea[1:dimIndex+1] = [qArea[i] * -qCargo[i] for i in xrange(dimIndex)]
q.volume[dimIndex] = hvol
if q.ignore >= dimIndex:
qArea[dimIndex] = qPrevDimIndex.area[dimIndex]
else:
qArea[dimIndex] = hvRecursive(dimIndex - 1, length, bounds)
if qArea[dimIndex] <= qPrevDimIndex.area[dimIndex]:
q.ignore = dimIndex
while p is not sentinel:
pCargoDimIndex = p.cargo[dimIndex]
hvol += q.area[dimIndex] * (pCargoDimIndex - q.cargo[dimIndex])
bounds[dimIndex] = pCargoDimIndex
reinsert(p, dimIndex, bounds)
length += 1
q = p
p = p.next[dimIndex]
q.volume[dimIndex] = hvol
if q.ignore >= dimIndex:
q.area[dimIndex] = q.prev[dimIndex].area[dimIndex]
else:
q.area[dimIndex] = hvRecursive(dimIndex - 1, length, bounds)
if q.area[dimIndex] <= q.prev[dimIndex].area[dimIndex]:
q.ignore = dimIndex
hvol -= q.area[dimIndex] * q.cargo[dimIndex]
return hvol
def preProcess(self, front):
"""Sets up the list Data structure needed for calculation."""
dimensions = len(self.referencePoint)
nodeList = MultiList(dimensions)
nodes = [MultiList.Node(dimensions, point) for point in front]
for i in xrange(dimensions):
self.sortByDimension(nodes, i)
nodeList.extend(nodes, i)
self.list = nodeList
def sortByDimension(self, nodes, i):
"""Sorts the list of nodes by the i-th value of the contained points."""
# build a list of tuples of (point[i], node)
decorated = [(node.cargo[i], node) for node in nodes]
# sort by this value
decorated.sort()
# write back to original list
nodes[:] = [node for (_, node) in decorated]
class MultiList:
"""A special Data structure needed by FonsecaHyperVolume.
It consists of several doubly linked lists that share common nodes. So,
every node has multiple predecessors and successors, one in every list.
"""
class Node:
def __init__(self, numberLists, cargo=None):
self.cargo = cargo
self.next = [None] * numberLists
self.prev = [None] * numberLists
self.ignore = 0
self.area = [0.0] * numberLists
self.volume = [0.0] * numberLists
def __str__(self):
return str(self.cargo)
def __init__(self, numberLists):
"""Constructor.
Builds 'numberLists' doubly linked lists.
Vivek: numberLists means number of dimensions
"""
self.numberLists = numberLists
self.sentinel = MultiList.Node(numberLists)
self.sentinel.next = [self.sentinel] * numberLists
self.sentinel.prev = [self.sentinel] * numberLists
def __str__(self):
strings = []
for i in xrange(self.numberLists):
currentList = []
node = self.sentinel.next[i]
while node != self.sentinel:
currentList.append(str(node))
node = node.next[i]
strings.append(str(currentList))
stringRepr = ""
for string in strings:
stringRepr += string + "\n"
return stringRepr
def __len__(self):
"""Returns the number of lists that are included in this MultiList."""
return self.numberLists
def getLength(self, i):
"""Returns the length of the i-th list."""
length = 0
sentinel = self.sentinel
node = sentinel.next[i]
while node != sentinel:
length += 1
node = node.next[i]
return length
def append(self, node, index):
"""Appends a node to the end of the list at the given index."""
lastButOne = self.sentinel.prev[index]
node.next[index] = self.sentinel
node.prev[index] = lastButOne
# set the last element as the new one
self.sentinel.prev[index] = node
lastButOne.next[index] = node
def extend(self, nodes, index):
"""Extends the list at the given index with the nodes."""
sentinel = self.sentinel
for node in nodes:
lastButOne = sentinel.prev[index]
node.next[index] = sentinel
node.prev[index] = lastButOne
# set the last element as the new one
sentinel.prev[index] = node
lastButOne.next[index] = node
def remove(self, node, index, bounds):
"""Removes and returns 'node' from all lists in [0, 'index'[."""
for i in xrange(index):
predecessor = node.prev[i]
successor = node.next[i]
predecessor.next[i] = successor
successor.prev[i] = predecessor
if bounds[i] > node.cargo[i]:
bounds[i] = node.cargo[i]
return node
def reinsert(self, node, index, bounds):
"""
Inserts 'node' at the position it had in all lists in [0, 'index'[
before it was removed. This method assumes that the next and previous
nodes of the node that is reinserted are in the list.
"""
for i in xrange(index):
node.prev[i].next[i] = node
node.next[i].prev[i] = node
if bounds[i] > node.cargo[i]:
bounds[i] = node.cargo[i]
if __name__ == "__main__":
# Example:
referencePoint = [5,5]
hv = HyperVolume(referencePoint)
front = [[1,4], [2,2], [1,3], [4,1]]
volume = hv.compute(front)
print volume
| gbtimmon/ase16GBT | code/9/hypervolume.py | Python | unlicense | 10,567 |
from social_core.pipeline.utils import SERIALIZABLE_TYPES, partial_to_session, \
partial_from_session
| cjltsod/python-social-auth | social/pipeline/utils.py | Python | bsd-3-clause | 106 |
#!/usr/bin/python3 -u
# A script for install alive script
import subprocess
import argparse
import re
import os
import shutil
from subprocess import call
parser = argparse.ArgumentParser(description='A script for install alive script and cron')
parser.add_argument('--url', help='The url where notify that this server is alive', required=True)
parser.add_argument('--user', help='The user for pastafari', required=True)
parser.add_argument('--pub_key', help='The pub key used in pastafari user', required=True)
args = parser.parse_args()
url=args.url
check_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if check_url.match(args.url):
# Create users
if call("sudo useradd -m -s /bin/sh %s" % args.user, shell=True) > 0:
print('Error, cannot add a new user')
exit(1)
else:
print('Added user')
if call("sudo mkdir -p /home/"+args.user+"/.ssh && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh && sudo chmod 700 /home/"+args.user+"/.ssh", shell=True) > 0:
print('Error, cannot add ssh directory')
exit(1)
else:
print('Added ssh directory')
if call("sudo cp "+args.pub_key+" /home/"+args.user+"/.ssh/authorized_keys && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh/authorized_keys && sudo chmod 600 /home/"+args.user+"/.ssh/authorized_keys", shell=True) > 0:
print('Error, cannot pub key to user')
exit(1)
else:
print('Added pub key to user')
# Edit alive cron
with open('modules/pastafari/scripts/monit/debian_jessie/files/crontab/alive') as f:
alive_cron=f.read()
with open('modules/pastafari/scripts/monit/debian_jessie/files/crontab/alive', 'w') as f:
alive_cron=alive_cron.replace('/home/spanel/modules/pastafari/scripts/monit/debian_jessie/files/get_info.py', '/usr/local/bin/get_info.py')
f.write(alive_cron)
# Edit get_info.py
with open('modules/pastafari/scripts/monit/debian_jessie/files/get_info.py') as f:
get_info=f.read()
with open('/usr/local/bin/get_info.py', 'w') as f:
get_info=get_info.replace("http://url/to/server/token/ip", args.url)
f.write(get_info)
os.chmod('/usr/local/bin/get_info.py', 0o700)
shutil.chown('/usr/local/bin/get_info.py', args.user, args.user)
# Edit get_updates.py
with open('modules/pastafari/scripts/monit/debian_jessie/files/get_updates.py') as f:
get_updates=f.read()
with open('/etc/cron.daily/get_updates.py', 'w') as f:
url_updates=args.url.replace('/getinfo/', '/getupdates/')
get_updates=get_updates.replace("http://url/to/server/token/ip", url_updates)
f.write(get_updates)
os.chmod('/etc/cron.daily/get_updates.py', 0o700)
# Edit sudo file
with open('modules/pastafari/scripts/monit/debian_jessie/files/sudoers.d/spanel') as f:
sudoers=f.read()
with open('/etc/sudoers.d/spanel', 'w') as f:
sudoers=sudoers.replace("spanel", args.user)
f.write(sudoers)
# Copy cron alive to /etc/cron.d/
if call("sudo cp modules/pastafari/scripts/monit/debian_jessie/files/crontab/alive /etc/cron.d/alive", shell=True) > 0:
print('Error, cannot install crontab alive file in cron.d')
exit(1)
else:
print('Added contrab alive file in cron.d')
print('Script installed successfully')
# Copy script for upgrades in /usr/local/bin
if call("mkdir /home/"+args.user+"/bin/ && cp modules/pastafari/scripts/standard/debian_jessie/upgrade.sh /home/"+args.user+"/bin/ && chown -R "+args.user+":"+args.user+" /home/"+args.user+"/bin/", shell=True) > 0:
print('Error, cannot install upgrade.py in /home/'+args.user+'/bin/')
exit(1)
else:
print('Added /home/'+args.user+'/bin/upgrade.py')
print('Script installed successfully')
# Making first call to site
if subprocess.call('/usr/local/bin/get_info.py', shell=True) > 0:
print('Error')
exit(1)
else:
print('Your server should be up in your panel...')
exit(0)
else:
print('Error installing the module, not valid url')
exit(1)
| paramecio/pastafari | scripts/monit/debian_jessie/alive.py | Python | gpl-2.0 | 4,578 |
import random
import re
from twisted.internet import reactor
from helga.plugins import command, preprocessor
silence_acks = (
u'silence is golden',
u'shutting up',
u'biting my tongue',
u'fine, whatever',
)
unsilence_acks = (
u'speaking once again',
u'did you miss me?',
u'FINALLY',
u'thanks {nick}, i was getting bored'
)
snarks = (
u'why would you want to do that {nick}?',
u'do you really despise me that much {nick}?',
u'whatever i do what i want',
u'no can do, i love the sound of my own voice',
)
# Set of silenced channels
silenced = set()
def auto_unsilence(client, channel, length):
global silenced
if channel in silenced:
silenced.discard(channel)
client.msg(channel, u"Speaking again after waiting {0} minutes".format(length//60))
@preprocessor
@command('stfu', aliases=['speak'],
help="Tell the bot to be quiet or not. Usage: helga (speak|stfu [for <time_in_minutes>])")
def stfu(client, channel, nick, message, *args):
global silenced
# Handle the message preprocesor
if len(args) == 0:
# Duh, don't silence the speak command
is_speak = bool(re.findall(ur'^{0}\W*\s(speak)$'.format(client.nickname), message))
if channel in silenced and not is_speak:
message = u''
return channel, nick, message
elif len(args) == 2:
resp = u''
if channel == nick: # Private message
resp = random.choice(snarks)
elif args[0] == 'stfu':
silenced.add(channel)
cmdargs = args[1]
resp = random.choice(silence_acks)
if len(cmdargs) > 0 and cmdargs[0] == 'for':
try:
length = int(cmdargs[1]) * 60
except (TypeError, ValueError, IndexError):
pass
else:
reactor.callLater(length, auto_unsilence, client, channel, length)
resp = u"OK {0}, I'll be back in {1} min".format(nick, cmdargs[1])
elif args[0] == 'speak':
if channel not in silenced:
return None
resp = random.choice(unsilence_acks)
silenced.discard(channel)
return resp.format(nick=nick)
| shaunduncan/helga-stfu | helga_stfu.py | Python | mit | 2,274 |
# -*- encoding: UTF-8 -*-
#
# Note:
# ==================================================================================================
# This code have been copied from http://tools.cherrypy.org/wiki/AuthenticationAndAccessRestrictions
# and modified for the purposes of this project.
# ==================================================================================================
#
# Form based authentication for CherryPy. Requires the
# Session tool to be loaded.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import cherrypy
import MySQLdb
from contextlib import closing
import hashlib
SESSION_KEY = '_cp_username'
USER_FILTERS = '_user_filters'
class UserFilters(object):
def __init__(self, endpoint=None):
self.endpoint = endpoint
def hasEndpointFilter(self):
return self.endpoint is not None
def getEndpointFilter(self):
return self.endpoint
def check_auth(*args, **kwargs):
"""A tool that looks in config for 'auth.require'. If found and it
is not None, a login is required and the entry is evaluated as a list of
conditions that the user must fulfill"""
conditions = cherrypy.request.config.get('auth.require', None)
if conditions is not None:
username = cherrypy.session.get(SESSION_KEY)
if username:
cherrypy.request.login = username
for condition in conditions:
# A condition is just a callable that returns true or false
if not condition():
raise cherrypy.HTTPRedirect("/auth/login")
else:
raise cherrypy.HTTPRedirect("/auth/login")
cherrypy.tools.auth = cherrypy.Tool('before_handler', check_auth)
def require(*conditions):
"""A decorator that appends conditions to the auth.require config
variable."""
def decorate(f):
if not hasattr(f, '_cp_config'):
f._cp_config = dict()
if 'auth.require' not in f._cp_config:
f._cp_config['auth.require'] = []
f._cp_config['auth.require'].extend(conditions)
return f
return decorate
# Conditions are callables that return True
# if the user fulfills the conditions they define, False otherwise
#
# They can access the current username as cherrypy.request.login
#
# Define those at will however suits the application.
def member_of(groupname):
def check():
# replace with actual check if <username> is in <groupname>
return cherrypy.request.login == 'joe' and groupname == 'admin'
return check
def name_is(reqd_username):
return lambda: reqd_username == cherrypy.request.login
# These might be handy
def any_of(*conditions):
"""Returns True if any of the conditions match"""
def check():
for c in conditions:
if c():
return True
return False
return check
# By default all conditions are required, but this might still be
# needed if you want to use it inside of an any_of(...) condition
def all_of(*conditions):
"""Returns True if all of the conditions match"""
def check():
for c in conditions:
if not c():
return False
return True
return check
# Controller to provide login and logout actions
class AuthController(object):
def __init__(self, login_form_template, dbhost, dbuser, dbpasswd, dbname):
self.login_form_template = login_form_template
self.dbhost = dbhost
self.dbuser = dbuser
self.dbpasswd = dbpasswd
self.dbname = dbname
def on_login(self, username):
"""Called on successful login"""
def on_logout(self, username):
"""Called on logout"""
def get_loginform(self, username, error_msg=None, from_page="/"):
return self.login_form_template.render(error_msg=error_msg, username=cherrypy.session.get(SESSION_KEY))
@cherrypy.expose
def login(self, username=None, password=None, from_page=None):
if username is None or password is None:
return self.get_loginform("", from_page=from_page)
row = self.getUsernameEntry(username)
if row is None:
return self.get_loginform(username, u"The username or password you entered is incorrect.", from_page)
hash = row[2]
salt = row[3]
viewfilter_endpoint = row[4]
if hash != hashlib.md5(salt + username + password).hexdigest():
return self.get_loginform(username, u"The username or password you entered is incorrect.", from_page)
cherrypy.session[SESSION_KEY] = cherrypy.request.login = username
cherrypy.session[USER_FILTERS] = UserFilters(viewfilter_endpoint)
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or "../translist")
def getUsernameEntry(self, username):
conn = MySQLdb.connect(host=self.dbhost, user=self.dbuser, passwd=self.dbpasswd, db=self.dbname)
with closing(conn.cursor()) as cursor:
cursor.execute("SELECT * FROM `users` WHERE username = %s", (username,))
row = cursor.fetchone()
conn.close()
return row
@cherrypy.expose
def logout(self, from_page="/"):
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page or "/")
| jembi/openhim-webui | openhim-webui/auth.py | Python | mpl-2.0 | 5,651 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="nanpy",
version="0.8",
description="Use your Arduino board with Python",
license="MIT",
author="Andrea Stagi",
author_email="stagi.andrea@gmail.com",
url="http://github.com/nanpy/nanpy",
packages = find_packages(),
keywords= "arduino library prototype",
zip_safe = True)
| pooyapooya/rizpardazande | setup.py | Python | mit | 405 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Fisher", sigma = 0.0, exog_count = 100, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Fisher/trend_MovingAverage/cycle_12/ar_12/test_artificial_1024_Fisher_MovingAverage_12_12_100.py | Python | bsd-3-clause | 269 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.info import tables
class ServicesTab(tabs.TableTab):
table_classes = (tables.ServicesTable,)
name = _("Services")
slug = "services"
template_name = ("horizon/common/_detail_table.html")
def get_services_data(self):
request = self.tab_group.request
services = []
for i, service in enumerate(request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, request.user.services_region))
return services
class ZonesTab(tabs.TableTab):
table_classes = (tables.ZonesTable,)
name = _("Availability Zones")
slug = "zones"
template_name = ("horizon/common/_detail_table.html")
def get_zones_data(self):
request = self.tab_group.request
zones = []
try:
zones = nova.availability_zone_list(request, detailed=True)
except Exception:
msg = _('Unable to retrieve availability zone data.')
exceptions.handle(request, msg)
return zones
class HostAggregatesTab(tabs.TableTab):
table_classes = (tables.AggregatesTable,)
name = _("Host Aggregates")
slug = "aggregates"
template_name = ("horizon/common/_detail_table.html")
def get_aggregates_data(self):
aggregates = []
try:
aggregates = nova.aggregate_list(self.tab_group.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve host aggregates list.'))
return aggregates
class NovaServicesTab(tabs.TableTab):
table_classes = (tables.NovaServicesTable,)
name = _("Compute Services")
slug = "nova_services"
template_name = ("horizon/common/_detail_table.html")
def get_nova_services_data(self):
try:
services = nova.service_list(self.tab_group.request)
except Exception:
services = []
msg = _('Unable to get nova services list.')
exceptions.check_message(["Connection", "refused"], msg)
raise
return services
class NetworkAgentsTab(tabs.TableTab):
table_classes = (tables.NetworkAgentsTable,)
name = _("Network Agents")
slug = "network_agents"
template_name = ("horizon/common/_detail_table.html")
def allowed(self, request):
return base.is_service_enabled(request, 'network')
def get_network_agents_data(self):
try:
agents = neutron.agent_list(self.tab_group.request)
except Exception:
agents = []
msg = _('Unable to get network agents list.')
exceptions.check_message(["Connection", "refused"], msg)
raise
return agents
class SystemInfoTabs(tabs.TabGroup):
slug = "system_info"
tabs = (ServicesTab, NovaServicesTab,
ZonesTab, HostAggregatesTab,
NetworkAgentsTab)
sticky = True
| Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/admin/info/tabs.py | Python | apache-2.0 | 3,905 |
from django.contrib import admin
from .models import (
Advisor, Album, Band, Bee, Car, CarTire, Event, Inventory, Member, Profile,
School, User,
)
class WidgetAdmin(admin.AdminSite):
pass
class CarAdmin(admin.ModelAdmin):
list_display = ['make', 'model', 'owner']
list_editable = ['owner']
class CarTireAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "car":
kwargs["queryset"] = Car.objects.filter(owner=request.user)
return db_field.formfield(**kwargs)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ['main_band', 'supporting_bands']
class AlbumAdmin(admin.ModelAdmin):
fields = ('name', 'cover_art',)
readonly_fields = ('cover_art',)
class SchoolAdmin(admin.ModelAdmin):
filter_vertical = ('students',)
filter_horizontal = ('alumni',)
site = WidgetAdmin(name='widget-admin')
site.register(User)
site.register(Car, CarAdmin)
site.register(CarTire, CarTireAdmin)
site.register(Member)
site.register(Band)
site.register(Event, EventAdmin)
site.register(Album, AlbumAdmin)
site.register(Inventory)
site.register(Bee)
site.register(Advisor)
site.register(School, SchoolAdmin)
site.register(Profile)
| nesdis/djongo | tests/django_tests/tests/v22/tests/admin_widgets/widgetadmin.py | Python | agpl-3.0 | 1,338 |
"""Meta functionality used for document creation."""
from __future__ import absolute_import
from .connection import Connection, get as get_connection
from .errors import OperationError
from .field import BaseField, Field
from bson import ObjectId
from .utils import to_snake_case
import six
class DocumentMeta(object):
"""Metadata container for Document classes."""
def __init__(self, cls, attrs, meta):
"""Initialize new document class metadata."""
self.cls = cls
self.fields = {}
self.options = {}
self.bind_init()
fields = {}
for base in reversed(cls.__bases__):
if isinstance(getattr(base, '_meta', None), self.__class__):
fields.update(base._meta.fields)
if attrs:
for name, attr in attrs.items():
if isinstance(attr, BaseField):
fields[name] = attr
self.fields = fields
if meta:
self.options.update(vars(meta))
self.connection = self.options.pop('connection', None)
self.collection = self.options.pop('collection', None)
if not self.collection:
self.collection = to_snake_case(cls.__name__)
self.subdocument = not bool(self.connection)
if not self.subdocument and '_id' not in self.fields:
self.fields['_id'] = Field(ObjectId, require=False)
self.bind_fields()
def bind_init(meta):
"""Bind init hook to the document class."""
parent = meta.cls.__init__
# prevent recursive decoration
if hasattr(parent, 'parent'):
parent = parent.parent
def __init__(self, *args, **kwargs):
if not self._partial:
for name, default in six.iteritems(meta.defaults):
if name in self._raw:
continue
if hasattr(default, '__call__'):
field = self._meta.get_field(name)
default = field.encode(meta.cls, name, default())
self._raw[name] = default
return parent(self, *args, **kwargs)
__init__.name = parent.__name__
__init__.hook = True
__init__.parent = parent
meta.cls.__init__ = __init__
def bind_fields(self):
"""Bind fields to the document class."""
defaults = {}
for name, field in self.fields.items():
setattr(self.cls, name, field(self.cls, name))
default = field.default
if default is not None:
if not hasattr(default, '__call__'):
default = field.encode(self.cls, name, field.default)
field.validate(self.cls, name, default)
defaults[name] = default
self.defaults = defaults
def get_connection(self, connection=None):
"""
Return the connection associated with this document. If connection is provided then it will
be used instead. This value may be the name of a connection or an actual connection object.
If this document has no connection (`self.connection is None`),
the default global connection is used.
"""
if connection:
if isinstance(connection, Connection):
return connection
else:
return get_connection(str(connection))
return get_connection(self.connection)
def get_collection(self, connection=None):
"""
Return the collection associated with this document. If error it True then errors are Raise
an OperationError if the document has no collection.
"""
if self.collection:
connection = self.get_connection(connection)
if connection:
return connection[self.collection]
raise OperationError(
"document {} has no connection, and no default exists".format(self.cls.__name__))
def get_partial(self, fields):
"""Return a valid partial value from a list of fields."""
if fields:
return {'_id'} | set(fields)
return None
def get_fields(self, partial):
"""Return a dictionary containing active fields."""
if partial:
return {k: self.fields[k] for k in partial if k in self.fields}
return self.fields
def get_field(self, name):
"""Return the named field. Supports dot syntax to retrieve fields from subdocuments."""
from .types import DocumentType
names = name.split('.')
names.reverse()
doc = self.cls
field = None
while len(names):
name = names.pop()
field = doc._meta.fields.get(name)
if not hasattr(field, 'typ') or not isinstance(field.typ, DocumentType):
break
doc = field.typ.document
if len(names):
return None
return field
@property
def readonly(self):
"""Return True if the document is readonly."""
return bool(self.options.get('readonly', False) or (
self.options.get('disable_save', False) and
self.options.get('disable_insert', False) and
self.options.get('disable_update', False) and
self.options.get('disable_remove', False)))
@property
def disable_save(self):
"""Return True if save is disabled for the document."""
return (bool(self.options.get('readonly', False) or
self.options.get('disable_save', False)))
@property
def disable_insert(self):
"""Return True if insert is disabled for the document."""
return (bool(self.options.get('readonly', False) or
self.options.get('disable_insert', False)))
@property
def disable_update(self):
"""Return True if update is disabled for the document."""
return (bool(self.options.get('readonly', False) or
self.options.get('disable_update', False)))
@property
def disable_remove(self):
"""Return True if remove is disabled for the document."""
return (bool(self.options.get('readonly', False) or
self.options.get('disable_remove', False)))
class DocumentBuilder(type):
"""Metaclass for building document classes."""
def __new__(meta, name, bases, attrs):
"""Create and attach metadata to the document."""
Meta = attrs.pop('Meta', {})
cls = type.__new__(meta, name, bases, attrs)
cls._meta = DocumentMeta(cls, attrs, Meta)
return cls
| WiFast/bearfield | bearfield/meta.py | Python | bsd-3-clause | 6,598 |
"""Geometry functions.
Rectangle is a utility class for working with rectangles (unions and
intersections).
A point is represented as a tuple `(x, y)`.
"""
from __future__ import annotations
from math import sqrt
from typing import Iterable, Optional, Tuple, Union
Point = Tuple[float, float] # x, y
Rect = Tuple[float, float, float, float] # x, y, width, height
class Rectangle:
"""Python Rectangle implementation. Rectangles can be added (union),
substituted (intersection) and points and rectangles can be tested to be in
the rectangle.
>>> r1= Rectangle(1,1,5,5)
>>> r2 = Rectangle(3,3,6,7)
Test if two rectangles intersect:
>>> if r1 - r2: 'yes'
'yes'
>>> r1, r2 = Rectangle(1,2,3,4), Rectangle(1,2,3,4)
>>> r1 == r2
True
>>> r = Rectangle(-5, 3, 10, 8)
>>> r.width = 2
>>> r
Rectangle(-5, 3, 2, 8)
>>> r = Rectangle(-5, 3, 10, 8)
>>> r.height = 2
>>> r
Rectangle(-5, 3, 10, 2)
"""
def __init__(
self,
x: float = 0,
y: float = 0,
width: Optional[float] = None,
height: Optional[float] = None,
x1: float = 0,
y1: float = 0,
):
if width is None:
self.x = min(x, x1)
self.width = abs(x1 - x)
else:
self.x = x
self.width = width
if height is None:
self.y = min(y, y1)
self.height = abs(y1 - y)
else:
self.y = y
self.height = height
@property
def x1(self) -> float:
return self.x + self.width
@x1.setter
def x1(self, x1: float) -> None:
width = x1 - self.x
width = max(width, 0)
self.width = width
@property
def y1(self) -> float:
return self.y + self.height
@y1.setter
def y1(self, y1: float) -> None:
height = y1 - self.y
height = max(height, 0)
self.height = height
def expand(self, delta: float) -> None:
"""
>>> r = Rectangle(-5, 3, 10, 8)
>>> r.expand(5)
>>> r
Rectangle(-10, -2, 20, 18)
"""
self.x -= delta
self.y -= delta
self.width += delta * 2
self.height += delta * 2
def tuple(self) -> Tuple[float, float, float, float]:
"""A type safe version of `tuple(rectangle)`."""
return (self.x, self.y, self.width, self.height)
def __repr__(self) -> str:
"""
>>> Rectangle(5,7,20,25)
Rectangle(5, 7, 20, 25)
"""
if self:
return f"{self.__class__.__name__}({self.x}, {self.y}, {self.width}, {self.height})"
return f"{self.__class__.__name__}()"
def __iter__(self) -> Iterable[float]:
"""
>>> tuple(Rectangle(1,2,3,4))
(1, 2, 3, 4)
"""
return iter((self.x, self.y, self.width, self.height))
def __getitem__(self, index: int) -> float:
"""
>>> Rectangle(1,2,3,4)[1]
2
"""
return (self.x, self.y, self.width, self.height)[index]
def __bool__(self) -> bool:
"""
>>> r=Rectangle(1,2,3,4)
>>> if r: 'yes'
'yes'
>>> r = Rectangle(1,1,0,0)
>>> if r: 'no'
"""
return self.width > 0 and self.height > 0
def __eq__(self, other: object) -> bool:
return (
isinstance(other, Rectangle)
and self.x == other.x
and self.y == other.y
and self.width == other.width
and self.height == self.height
)
def __add__(self, obj: Union[Rectangle, Rect]) -> Rectangle:
"""Create a new Rectangle is the union of the current rectangle with
another Rectangle, tuple `(x,y)` or tuple `(x, y, width, height)`.
>>> r=Rectangle(5, 7, 20, 25)
>>> r + (0, 0)
Traceback (most recent call last):
...
TypeError: Can only add Rectangle or tuple (x, y, width, height), not (0, 0).
>>> r + (20, 30, 40, 50)
Rectangle(5, 7, 55, 73)
"""
return Rectangle(self.x, self.y, self.width, self.height).__iadd__(obj)
def __iadd__(self, obj: Union[Rectangle, Rect]) -> Rectangle:
"""
>>> r = Rectangle()
>>> r += Rectangle(5, 7, 20, 25)
>>> r += (0, 0, 30, 10)
>>> r
Rectangle(0, 0, 30, 32)
>>> r += 'aap'
Traceback (most recent call last):
...
TypeError: Can only add Rectangle or tuple (x, y, width, height), not 'aap'.
"""
try:
x, y, width, height = obj # type: ignore[misc]
except ValueError:
raise TypeError(
f"Can only add Rectangle or tuple (x, y, width, height), not {repr(obj)}."
)
x1, y1 = x + width, y + height
if self:
ox1, oy1 = self.x + self.width, self.y + self.height
self.x = min(self.x, x)
self.y = min(self.y, y)
self.x1 = max(ox1, x1)
self.y1 = max(oy1, y1)
else:
self.x, self.y, self.width, self.height = x, y, width, height
return self
def __sub__(self, obj: Union[Rectangle, Rect]) -> Rectangle:
"""Create a new Rectangle is the union of the current rectangle with
another Rectangle or tuple (x, y, width, height).
>>> r = Rectangle(5, 7, 20, 25)
>>> r - (20, 30, 40, 50)
Rectangle(20, 30, 5, 2)
>>> r - (30, 40, 40, 50)
Rectangle()
"""
return Rectangle(self.x, self.y, self.width, self.height).__isub__(obj)
def __isub__(self, obj: Union[Rectangle, Rect]) -> Rectangle:
"""
>>> r = Rectangle()
>>> r -= Rectangle(5, 7, 20, 25)
>>> r -= (0, 0, 30, 10)
>>> r
Rectangle(5, 7, 20, 3)
>>> r -= 'aap'
Traceback (most recent call last):
...
TypeError: Can only subtract Rectangle or tuple (x, y, width, height), not 'aap'.
"""
try:
x, y, width, height = obj # type: ignore[misc]
except ValueError:
raise TypeError(
f"Can only subtract Rectangle or tuple (x, y, width, height), not {repr(obj)}."
)
x1, y1 = x + width, y + height
if self:
ox1, oy1 = self.x + self.width, self.y + self.height
self.x = max(self.x, x)
self.y = max(self.y, y)
self.x1 = min(ox1, x1)
self.y1 = min(oy1, y1)
else:
self.x, self.y, self.width, self.height = x, y, width, height
return self
def __contains__(self, obj: Union[Rectangle, Rect, Point]) -> bool:
"""Check if a point `(x, y)` in inside rectangle `(x, y, width,
height)` or if a rectangle instance is inside with the rectangle.
>>> r=Rectangle(10, 5, 12, 12)
>>> (0, 0) in r
False
>>> (10, 6) in r
True
>>> (12, 12) in r
True
>>> (100, 4) in r
False
>>> (11, 6, 5, 5) in r
True
>>> (11, 6, 15, 15) in r
False
>>> Rectangle(11, 6, 5, 5) in r
True
>>> Rectangle(11, 6, 15, 15) in r
False
>>> 'aap' in r
Traceback (most recent call last):
...
TypeError: Should compare to Rectangle, tuple (x, y, width, height) or point (x, y), not 'aap'.
"""
try:
x, y, width, height = obj # type: ignore[misc]
x1, y1 = x + width, y + width
except ValueError:
# point
try:
(x, y) = (x1, y1) = obj # type: ignore[misc]
except ValueError:
raise TypeError(
f"Should compare to Rectangle, tuple (x, y, width, height) or point (x, y), not {repr(obj)}."
)
return x >= self.x and x1 <= self.x1 and y >= self.y and y1 <= self.y1
def distance_point_point(point1: Point, point2: Point = (0.0, 0.0)) -> float:
"""Return the distance from point ``point1`` to ``point2``.
>>> f"{distance_point_point((0,0), (1,1)):.3f}"
'1.414'
"""
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
return sqrt(dx * dx + dy * dy)
def distance_point_point_fast(point1: Point, point2: Point = (0.0, 0.0)) -> float:
"""Return the distance from point ``point1`` to ``point2``. This version is
faster than ``distance_point_point()``, but less precise.
>>> distance_point_point_fast((0,0), (1,1))
2
"""
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
return abs(dx) + abs(dy)
def distance_rectangle_point(rect: Rect, point: Point) -> float:
"""Return the distance (fast) from a rectangle ``(x, y, width,height)`` to
a ``point``.
>>> distance_rectangle_point(Rectangle(0, 0, 10, 10), (11, -1))
2
>>> distance_rectangle_point((0, 0, 10, 10), (11, -1))
2
>>> distance_rectangle_point((0, 0, 10, 10), (-1, 11))
2
"""
dx = dy = 0.0
px, py = point
rx, ry, rw, rh = rect
if px < rx:
dx = rx - px
elif px > rx + rw:
dx = px - (rx + rw)
if py < ry:
dy = ry - py
elif py > ry + rh:
dy = py - (ry + rh)
return abs(dx) + abs(dy)
def point_on_rectangle(rect: Rect, point: Point, border: bool = False) -> Point:
"""
Return the point on which ``point`` can be projecten on the
rectangle. ``border = True`` will make sure the point is bound to
the border of the reactangle. Otherwise, if the point is in the
rectangle, it's okay.
>>> point_on_rectangle(Rectangle(0, 0, 10, 10), (11, -1))
(10, 0)
>>> point_on_rectangle((0, 0, 10, 10), (5, 12))
(5, 10)
>>> point_on_rectangle(Rectangle(0, 0, 10, 10), (12, 5))
(10, 5)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (3, 4))
(3, 4)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (0, 3))
(1, 3)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (4, 3))
(4, 3)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (4, 9), border=True)
(4, 11)
>>> point_on_rectangle((1, 1, 10, 10), (4, 6), border=True)
(1, 6)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (5, 3), border=True)
(5, 1)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (8, 4), border=True)
(11, 4)
>>> point_on_rectangle((1, 1, 10, 100), (5, 8), border=True)
(1, 8)
>>> point_on_rectangle((1, 1, 10, 100), (5, 98), border=True)
(5, 101)
"""
px, py = point
rx, ry, rw, rh = tuple(rect)
x_inside = y_inside = False
if px < rx:
px = rx
elif px > rx + rw:
px = rx + rw
elif border:
x_inside = True
if py < ry:
py = ry
elif py > ry + rh:
py = ry + rh
elif border:
y_inside = True
if x_inside and y_inside:
# Find point on side closest to the point
if min(abs(rx - px), abs(rx + rw - px)) > min(abs(ry - py), abs(ry + rh - py)):
if py < ry + rh / 2.0:
py = ry
else:
py = ry + rh
else:
if px < rx + rw / 2.0:
px = rx
else:
px = rx + rw
return px, py
def distance_line_point(
line_start: Point, line_end: Point, point: Point
) -> Tuple[float, Point]:
"""Calculate the distance of a ``point`` from a line. The line is marked by
begin and end point ``line_start`` and ``line_end``.
A tuple is returned containing the distance and point on the line.
>>> distance_line_point((0., 0.), (2., 4.), point=(3., 4.))
(1.0, (2.0, 4.0))
>>> distance_line_point((0., 0.), (2., 4.), point=(-1., 0.))
(1.0, (0.0, 0.0))
>>> distance_line_point((0., 0.), (2., 4.), point=(1., 2.))
(0.0, (1.0, 2.0))
>>> d, p = distance_line_point((0., 0.), (2., 4.), point=(2., 2.))
>>> f"{d:.3f}"
'0.894'
>>> f"({p[0]:.3f}, {p[1]:.3f})"
'(1.200, 2.400)'
"""
# The original end point:
true_line_end = line_end
# "Move" the line, so it "starts" on (0, 0)
line_end = line_end[0] - line_start[0], line_end[1] - line_start[1]
point = point[0] - line_start[0], point[1] - line_start[1]
line_len_sqr = line_end[0] * line_end[0] + line_end[1] * line_end[1]
# Both points are very near each other.
if line_len_sqr < 0.0001:
return distance_point_point(point), line_start
projlen = (line_end[0] * point[0] + line_end[1] * point[1]) / line_len_sqr
if projlen < 0.0:
# Closest point is the start of the line.
return distance_point_point(point), line_start
elif projlen > 1.0:
# Point has a projection after the line_end.
return distance_point_point(point, line_end), true_line_end
else:
# Projection is on the line. multiply the line_end with the projlen
# factor to obtain the point on the line.
proj = line_end[0] * projlen, line_end[1] * projlen
return (
distance_point_point((proj[0] - point[0], proj[1] - point[1])),
(line_start[0] + proj[0], line_start[1] + proj[1]),
)
def intersect_line_line(
line1_start: Point, line1_end: Point, line2_start: Point, line2_end: Point
) -> Optional[Point]:
"""Find the point where the lines (segments) defined by ``(line1_start,
line1_end)`` and ``(line2_start, line2_end)`` intersect. If no
intersection occurs, ``None`` is returned.
>>> intersect_line_line((3, 0), (8, 10), (0, 0), (10, 10))
(6, 6)
>>> intersect_line_line((0, 0), (10, 10), (3, 0), (8, 10))
(6, 6)
>>> intersect_line_line((0, 0), (10, 10), (8, 10), (3, 0))
(6, 6)
>>> intersect_line_line((8, 10), (3, 0), (0, 0), (10, 10))
(6, 6)
>>> intersect_line_line((0, 0), (0, 10), (3, 0), (8, 10))
>>> intersect_line_line((0, 0), (0, 10), (3, 0), (3, 10))
Ticket #168:
>>> intersect_line_line((478.0, 117.0), (478.0, 166.0), (527.5, 141.5), (336.5, 139.5))
(478.5, 141.48167539267016)
>>> intersect_line_line((527.5, 141.5), (336.5, 139.5), (478.0, 117.0), (478.0, 166.0))
(478.5, 141.48167539267016)
This is a Python translation of the ``lines_intersect``, C Code from
Graphics Gems II, Academic Press, Inc. The original routine was written
by Mukesh Prasad.
EULA: The Graphics Gems code is copyright-protected. In other words, you
cannot claim the text of the code as your own and resell it. Using the code
is permitted in any program, product, or library, non-commercial or
commercial. Giving credit is not required, though is a nice gesture. The
code comes as-is, and if there are any flaws or problems with any Gems
code, nobody involved with Gems - authors, editors, publishers, or
webmasters - are to be held responsible. Basically, don't be a jerk, and
remember that anything free comes with no guarantee.
"""
#
# This function computes whether two line segments,
# respectively joining the input points (x1,y1) -- (x2,y2)
# and the input points (x3,y3) -- (x4,y4) intersect.
# If the lines intersect, the output variables x, y are
# set to coordinates of the point of intersection.
#
# All values are in integers. The returned value is rounded
# to the nearest integer point.
#
# If non-integral grid points are relevant, the function
# can easily be transformed by substituting floating point
# calculations instead of integer calculations.
#
# Entry
# x1, y1, x2, y2 Coordinates of endpoints of one segment.
# x3, y3, x4, y4 Coordinates of endpoints of other segment.
#
# Exit
# x, y Coordinates of intersection point.
#
# The value returned by the function is one of:
#
# DONT_INTERSECT 0
# DO_INTERSECT 1
# COLLINEAR 2
#
# Error conditions:
#
# Depending upon the possible ranges, and particularly on 16-bit
# computers, care should be taken to protect from overflow.
#
# In the following code, 'long' values have been used for this
# purpose, instead of 'int'.
#
x1, y1 = line1_start
x2, y2 = line1_end
x3, y3 = line2_start
x4, y4 = line2_end
# long a1, a2, b1, b2, c1, c2; /* Coefficients of line eqns. */
# long r1, r2, r3, r4; /* 'Sign' values */
# long denom, offset, num; /* Intermediate values */
# Compute a1, b1, c1, where line joining points 1 and 2
# is "a1 x + b1 y + c1 = 0".
a1 = y2 - y1
b1 = x1 - x2
c1 = x2 * y1 - x1 * y2
# Compute r3 and r4.
r3 = a1 * x3 + b1 * y3 + c1
r4 = a1 * x4 + b1 * y4 + c1
# Check signs of r3 and r4. If both point 3 and point 4 lie on
# same side of line 1, the line segments do not intersect.
if r3 and r4 and (r3 * r4) >= 0:
return None # ( DONT_INTERSECT )
# Compute a2, b2, c2
a2 = y4 - y3
b2 = x3 - x4
c2 = x4 * y3 - x3 * y4
# Compute r1 and r2
r1 = a2 * x1 + b2 * y1 + c2
r2 = a2 * x2 + b2 * y2 + c2
# Check signs of r1 and r2. If both point 1 and point 2 lie
# on same side of second line segment, the line segments do
# not intersect.
if r1 and r2 and (r1 * r2) >= 0: # SAME_SIGNS( r1, r2 ))
return None # ( DONT_INTERSECT )
# Line segments intersect: compute intersection point.
# The denom / 2 is to get rounding instead of truncating. It
# is added or subtracted to the numerator, depending upon the
# sign of the numerator.
denom = a1 * b2 - a2 * b1
x_num = b1 * c2 - b2 * c1
y_num = a2 * c1 - a1 * c2
if not denom:
return None # ( COLLINEAR )
elif isinstance(denom, float): # denom is float, use normal division
offset = abs(denom) / 2
x = ((x_num < 0) and (x_num - offset) or (x_num + offset)) / denom
y = ((y_num < 0) and (y_num - offset) or (y_num + offset)) / denom
else: # denom is int, use integer division
offset = abs(denom) // 2
x = ((x_num < 0) and (x_num - offset) or (x_num + offset)) // denom
y = ((y_num < 0) and (y_num - offset) or (y_num + offset)) // denom
return x, y
def rectangle_contains(inner: Rect, outer: Rect) -> bool:
"""Returns True if ``inner`` rect is contained in ``outer`` rect."""
ix, iy, iw, ih = inner
ox, oy, ow, oh = outer
return ox <= ix and oy <= iy and ox + ow >= ix + iw and oy + oh >= iy + ih
def rectangle_intersects(recta: Rect, rectb: Rect) -> bool:
"""Return True if ``recta`` and ``rectb`` intersect.
>>> rectangle_intersects((5,5,20, 20), (10, 10, 1, 1))
True
>>> rectangle_intersects((40, 30, 10, 1), (1, 1, 1, 1))
False
"""
ax, ay, aw, ah = recta
bx, by, bw, bh = rectb
return ax <= bx + bw and ax + aw >= bx and ay <= by + bh and ay + ah >= by
def rectangle_clip(recta: Rect, rectb: Rect) -> Optional[Rect]:
"""Return the clipped rectangle of ``recta`` and ``rectb``. If they do not
intersect, ``None`` is returned.
>>> rectangle_clip((0, 0, 20, 20), (10, 10, 20, 20))
(10, 10, 10, 10)
"""
ax, ay, aw, ah = recta
bx, by, bw, bh = rectb
x = max(ax, bx)
y = max(ay, by)
w = min(ax + aw, bx + bw) - x
h = min(ay + ah, by + bh) - y
if w < 0 or h < 0:
return None
return (x, y, w, h)
| amolenaar/gaphas | gaphas/geometry.py | Python | lgpl-2.1 | 19,490 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from telemetry.core import util
from telemetry.results import buildbot_output_formatter
from telemetry.results import csv_output_formatter
from telemetry.results import gtest_progress_reporter
from telemetry.results import html_output_formatter
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results import progress_reporter
# Allowed output formats. The default is the first item in the list.
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'block', 'csv', 'gtest',
'json', 'none')
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--output-format',
default=_OUTPUT_FORMAT_CHOICES[0],
choices=_OUTPUT_FORMAT_CHOICES,
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace.')
group.add_option('--reset-results', action='store_true',
help='Delete all stored results.')
group.add_option('--upload-results', action='store_true',
help='Upload the results to cloud storage.')
group.add_option('--results-label',
default=None,
help='Optional label to use for the results of a run .')
group.add_option('--suppress_gtest_report',
default=False,
help='Whether to suppress GTest progress report.')
parser.add_option_group(group)
def CreateResults(metadata, options):
"""
Args:
options: Contains the options specified in AddResultsOptions.
"""
# TODO(chrishenry): This logic prevents us from having multiple
# OutputFormatters. We should have an output_file per OutputFormatter.
# Maybe we should have --output-dir instead of --output-file?
if options.output_format == 'html' and not options.output_file:
options.output_file = os.path.join(util.GetBaseDir(), 'results.html')
elif options.output_format == 'json' and not options.output_file:
options.output_file = os.path.join(util.GetBaseDir(), 'results.json')
if hasattr(options, 'output_file') and options.output_file:
output_file = os.path.expanduser(options.output_file)
open(output_file, 'a').close() # Create file if it doesn't exist.
output_stream = open(output_file, 'r+')
else:
output_stream = sys.stdout
if not hasattr(options, 'output_format'):
options.output_format = _OUTPUT_FORMAT_CHOICES[0]
if not hasattr(options, 'output_trace_tag'):
options.output_trace_tag = ''
output_formatters = []
output_skipped_tests_summary = True
reporter = None
if options.output_format == 'none':
pass
elif options.output_format == 'csv':
output_formatters.append(csv_output_formatter.CsvOutputFormatter(
output_stream))
elif options.output_format == 'buildbot':
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif options.output_format == 'gtest':
# TODO(chrishenry): This is here to not change the output of
# gtest. Let's try enabling skipped tests summary for gtest test
# results too (in a separate patch), and see if we break anything.
output_skipped_tests_summary = False
elif options.output_format == 'html':
# TODO(chrishenry): We show buildbot output so that users can grep
# through the results easily without needing to open the html
# file. Another option for this is to output the results directly
# in gtest-style results (via some sort of progress reporter),
# as we plan to enable gtest-style output for all output formatters.
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif options.output_format == 'json':
output_formatters.append(
json_output_formatter.JsonOutputFormatter(output_stream, metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (options.output_format,
', '.join(_OUTPUT_FORMAT_CHOICES)))
if options.suppress_gtest_report:
reporter = progress_reporter.ProgressReporter()
else:
reporter = gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter)
| ondra-novak/chromium.src | tools/telemetry/telemetry/results/results_options.py | Python | bsd-3-clause | 5,323 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask_paginate
~~~~~~~~~~~~~~~~~~
Adds pagination support to your application.
:copyright: (c) 2013 by Lix Xu.
:license: BSD, see LICENSE for more details
"""
from __future__ import unicode_literals
import sys
from flask import request, url_for, Markup, current_app
__version__ = '0.4.5'
PY2 = sys.version_info[0] == 2
_bs_prev_page = '<li class="previous"><a href="{0}">{1}</a></li>'
PREV_PAGES = dict(bootstrap=_bs_prev_page,
bootstrap2=_bs_prev_page,
bootstrap3=_bs_prev_page,
foundation='<li class="arrow"><a href="{0}">{1}</a></li>',
)
_bs_next_page = '<li class="next"><a href="{0}">{1}</a></li>'
NEXT_PAGES = dict(bootstrap=_bs_next_page,
bootstrap2=_bs_next_page,
bootstrap3=_bs_next_page,
foundation='<li class="arrow"><a href="{0}">{1}</a></li>',
)
CURRENT_PAGES = dict(bootstrap='<li class="active"><a>{0}</a></li>',
bootstrap3='<li class="active"><a>{0}</a></li>',
foundation='<li class="current"><a>{0}</a></li>',
)
CURRENT_PAGES.update(bootstrap2=CURRENT_PAGES['bootstrap'])
LINK = '<li><a href="{0}">{1}</a></li>'
FA_LINK = '<li class="unavailable"><a>{0}</a></li>'
GAP_MARKERS = dict(bootstrap='<li class="disabled"><a>...</a></li>',
foundation='<li class="unavailable">\
<a>...</a></li>',
)
GAP_MARKERS.update(bootstrap2=GAP_MARKERS['bootstrap'],
bootstrap3=GAP_MARKERS['bootstrap'],
)
_bs_prev_disabled_page = '<li class="previous disabled unavailable">\
<a> {0} </a></li>'
PREV_DISABLED_PAGES = dict(bootstrap=_bs_prev_disabled_page,
bootstrap2=_bs_prev_disabled_page,
bootstrap3=_bs_prev_disabled_page,
foundation=FA_LINK,
)
_bs_next_disabled_page = '<li class="next disabled">\
<a> {0} </a></li>'
NEXT_DISABLED_PAGES = dict(bootstrap=_bs_next_disabled_page,
bootstrap2=_bs_next_disabled_page,
bootstrap3=_bs_next_disabled_page,
foundation=FA_LINK,
)
PREV_LABEL = '«'
NEXT_LABEL = '»'
RECORD_NAME = 'records'
DISPLAY_MSG = '''displaying <b>{start} - {end}</b> {record_name} in
total <b>{total}</b>'''
SEARCH_MSG = '''found <b>{found}</b> {record_name},
displaying <b>{start} - {end}</b>'''
CSS_LINKS = dict(bootstrap='<div class="pagination{0}{1}"><ul>',
bootstrap2='<div class="pagination{0}{1}"><ul>',
bootstrap3='<ul class="pagination{0}{1}">',
foundation='<ul class="pagination{0}{1}">',
)
CSS_LINKS_END = dict(bootstrap='</ul></div>',
bootstrap2='</ul></div>',
bootstrap3='</ul>',
foundation='</ul>',
)
# foundation aligment
F_ALIGNMENT = '<div class="pagination-{0}">'
def get_page_args():
args = request.args.copy()
args.update(request.view_args.copy())
page_parameter = args.get('page_parameter', 'page')
page = int(args.get(page_parameter, 1))
per_page = args.get('per_page')
if not per_page:
per_page = current_app.config.get('PER_PAGE', 10)
else:
per_page = int(per_page)
offset = (page - 1) * per_page
return page, per_page, offset
class Pagination(object):
"""A simple pagination extension for flask."""
def __init__(self, found=0, **kwargs):
'''Detail parameters.
**found**: used when searching
**page**: current page
**per_page**: how many records displayed on one page
**page_parameter**: a name(string) of a GET parameter that holds \
a page index, Use it if you want to iterate over multiple Pagination \
objects simultaniously.
default is 'page'.
**inner_window**: how many links arround current page
**outer_window**: how many links near first/last link
**prev_label**: text for previous page, default is **«**
**next_label**: text for next page, default is **»**
**search**: search or not?
**total**: total records for pagination
**display_msg**: text for pagation information
**search_msg**: text for search information
**record_name**: record name showed in pagination information
**link_size**: font size of page links
**alignment**: the alignment of pagination links
**href**: Add custom href for links - this supports forms \
with post method. It MUST contain {0} to format page number
**show_single_page**: decide whether or not a single page \
returns pagination
**bs_version**: the version of bootstrap, default is **2**
**css_framework**: the css framework, default is **bootstrap**
**anchor**: anchor parameter, appends to page href
**format_total**: number format total, like **1,234**, \
default is False
**format_number**: number format start and end, like **1,234**, \
default is False
'''
self.found = found
self.page_parameter = kwargs.get('page_parameter', 'page')
self.page = kwargs.get(self.page_parameter, 1)
self.per_page = kwargs.get('per_page', 10)
self.inner_window = kwargs.get('inner_window', 2)
self.outer_window = kwargs.get('outer_window', 1)
self.prev_label = kwargs.get('prev_label') or PREV_LABEL
self.next_label = kwargs.get('next_label') or NEXT_LABEL
self.search = kwargs.get('search', False)
self.total = kwargs.get('total', 0)
self.format_total = kwargs.get('format_total', False)
self.format_number = kwargs.get('format_number', False)
self.display_msg = kwargs.get('display_msg') or DISPLAY_MSG
self.search_msg = kwargs.get('search_msg') or SEARCH_MSG
self.record_name = kwargs.get('record_name') or RECORD_NAME
self.css_framework = kwargs.get('css_framework', 'bootstrap').lower()
if self.css_framework not in CURRENT_PAGES:
self.css_framework = 'bootstrap'
self.bs_version = kwargs.get('bs_version') or 2
if self.css_framework.startswith('bootstrap'):
if self.bs_version in (3, '3'):
self.css_framework = 'bootstrap3'
self.link_size = kwargs.get('link_size', '')
if self.link_size:
if self.css_framework == 'foundation':
self.link_size = ''
else:
self.link_size = ' pagination-{0}'.format(self.link_size)
self.alignment = kwargs.get('alignment', '')
if self.alignment and self.css_framework.startswith('bootstrap'):
self.alignment = ' pagination-{0}'.format(self.alignment)
self.href = kwargs.get('href', None)
self.anchor = kwargs.get('anchor', None)
self.show_single_page = kwargs.get('show_single_page', False)
self.link = LINK
self.current_page_fmt = CURRENT_PAGES[self.css_framework]
self.link_css_fmt = CSS_LINKS[self.css_framework]
self.gap_marker_fmt = GAP_MARKERS[self.css_framework]
self.prev_disabled_page_fmt = PREV_DISABLED_PAGES[self.css_framework]
self.next_disabled_page_fmt = NEXT_DISABLED_PAGES[self.css_framework]
self.prev_page_fmt = PREV_PAGES[self.css_framework]
self.next_page_fmt = NEXT_PAGES[self.css_framework]
self.css_end_fmt = CSS_LINKS_END[self.css_framework]
self.init_values()
def page_href(self, page):
if self.href:
url = self.href.format(page or 1)
else:
self.args[self.page_parameter] = page
if self.anchor:
url = url_for(self.endpoint, _anchor=self.anchor, **self.args)
else:
url = url_for(self.endpoint, **self.args)
# Need to return a unicode object
return url.decode('utf8') if PY2 else url
def init_values(self):
current_total = self.found if self.search else self.total
pages = divmod(current_total, self.per_page)
self.total_pages = pages[0] + 1 if pages[1] else pages[0]
self.has_prev = self.page > 1
self.has_next = self.page < self.total_pages
args = request.args.copy()
args.update(request.view_args.copy())
self.args = {}
for k, v in args.lists():
if len(v) == 1:
self.args[k] = v[0]
else:
self.args[k] = v
self.endpoint = request.endpoint
@property
def prev_page(self):
if self.has_prev:
page = self.page - 1 if self.page > 2 else None
url = self.page_href(page)
return self.prev_page_fmt.format(url, self.prev_label)
return self.prev_disabled_page_fmt.format(self.prev_label)
@property
def next_page(self):
if self.has_next:
url = self.page_href(self.page + 1)
return self.next_page_fmt.format(url, self.next_label)
return self.next_disabled_page_fmt.format(self.next_label)
@property
def first_page(self):
# current page is first page
if self.has_prev:
return self.link.format(self.page_href(None), 1)
return self.current_page_fmt.format(1)
@property
def last_page(self):
if self.has_next:
url = self.page_href(self.total_pages)
return self.link.format(url, self.total_pages)
return self.current_page_fmt.format(self.page)
@property
def pages(self):
if self.total_pages < self.inner_window * 2 - 1:
return range(1, self.total_pages + 1)
pages = []
win_from = self.page - self.inner_window
win_to = self.page + self.inner_window
if win_to > self.total_pages:
win_from -= win_to - self.total_pages
win_to = self.total_pages
if win_from < 1:
win_to = win_to + 1 - win_from
win_from = 1
if win_to > self.total_pages:
win_to = self.total_pages
if win_from > self.inner_window:
pages.extend(range(1, self.outer_window + 1 + 1))
pages.append(None)
else:
pages.extend(range(1, win_to + 1))
if win_to < self.total_pages - self.inner_window + 1:
if win_from > self.inner_window:
pages.extend(range(win_from, win_to + 1))
pages.append(None)
pages.extend(range(self.total_pages - 1, self.total_pages + 1))
elif win_from > self.inner_window:
pages.extend(range(win_from, self.total_pages + 1))
else:
pages.extend(range(win_to + 1, self.total_pages + 1))
return pages
def single_page(self, page):
if page == self.page:
return self.current_page_fmt.format(page)
if page == 1:
return self.first_page
if page == self.total_pages:
return self.last_page
return self.link.format(self.page_href(page), page)
def _get_single_page_link(self):
s = [self.link_css_fmt.format(self.link_size, self.alignment)]
s.append(self.prev_page)
s.append(self.single_page(1))
s.append(self.next_page)
s.append(self.css_end_fmt)
if self.css_framework == 'foundation' and self.alignment:
s.insert(0, F_ALIGNMENT.format(self.alignment))
s.append('</div>')
return Markup(''.join(s))
@property
def links(self):
"""Get all the pagination links."""
if self.total_pages <= 1:
if self.show_single_page:
return self._get_single_page_link()
return ''
s = [self.link_css_fmt.format(self.link_size, self.alignment)]
s.append(self.prev_page)
for page in self.pages:
s.append(self.single_page(page) if page else self.gap_marker_fmt)
s.append(self.next_page)
s.append(self.css_end_fmt)
if self.css_framework == 'foundation' and self.alignment:
s.insert(0, F_ALIGNMENT.format(self.alignment))
s.append('</div>')
return Markup(''.join(s))
@property
def info(self):
"""Get the pagination information."""
start = 1 + (self.page - 1) * self.per_page
end = start + self.per_page - 1
if end > self.total:
end = self.total if not self.search else self.found
if start > self.total:
start = self.total if not self.search else self.found
s = ['<div class="pagination-page-info">']
page_msg = self.search_msg if self.search else self.display_msg
if self.format_total:
total_text = '{0:,}'.format(self.total)
else:
total_text = '{0}'.format(self.total)
if self.format_number:
start_text = '{0:,}'.format(start)
end_text = '{0:,}'.format(end)
else:
start_text = start
end_text = end
s.append(page_msg.format(found=self.found,
total=total_text,
start=start_text,
end=end_text,
record_name=self.record_name,
)
)
s.append('</div>')
return Markup(''.join(s))
| MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/flask_paginate/__init__.py | Python | mit | 13,862 |
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
define websocket exceptions
"""
class WebSocketException(Exception):
"""
websocket exception class.
"""
pass
class WebSocketProtocolException(WebSocketException):
"""
If the websocket protocol is invalid, this exception will be raised.
"""
pass
class WebSocketPayloadException(WebSocketException):
"""
If the websocket payload is invalid, this exception will be raised.
"""
pass
class WebSocketConnectionClosedException(WebSocketException):
"""
If remote host closed the connection or some network error happened,
this exception will be raised.
"""
pass
class WebSocketTimeoutException(WebSocketException):
"""
WebSocketTimeoutException will be raised at socket timeout during read/write data.
"""
pass
class WebSocketProxyException(WebSocketException):
"""
WebSocketProxyException will be raised when proxy error occurred.
"""
pass
class WebSocketBadStatusException(WebSocketException):
"""
WebSocketBadStatusException will be raised when we get bad handshake status code.
"""
def __init__(self, message, status_code, status_message=None, resp_headers=None):
msg = message % (status_code, status_message)
super(WebSocketBadStatusException, self).__init__(msg)
self.status_code = status_code
self.resp_headers = resp_headers
class WebSocketAddressException(WebSocketException):
"""
If the websocket address info cannot be found, this exception will be raised.
"""
pass
| youtube/cobalt | third_party/websocket-client/websocket/_exceptions.py | Python | bsd-3-clause | 2,406 |
#!/usr/bin/python
#=======================================================================
# Copyright Nicholas Tuckett 2015.
# Distributed under the MIT License.
# (See accompanying file license.txt or copy at
# http://opensource.org/licenses/MIT)
#=======================================================================
'''
PiMony -- Smart remote control prototype
This is the main module for PiMony, a prototype smart remote control program
@author: Nicholas Tuckett
@copyright: 2014 Nicholas Tuckett. All rights reserved.
@license: private
@contact: pimony@magwitch.uk.net
@deffield updated: Updated
'''
import sys
import os
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from PyGameInterface import PyGameInterface
__all__ = []
__version__ = 0.1
__date__ = '2014-10-26'
__updated__ = '2014-10-26'
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by Nicholas Tuckett on %s.
Copyright 2014 Nicholas Tuckett. All rights reserved.
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-d", "--debug", dest="debug", help="Specify debug mode: provide remote host name as value")
# Process arguments
args = parser.parse_args()
if args.debug:
sys.path.append(r'/home/pi/pysrc')
import pydevd
pydevd.settrace(args.debug)
interface = PyGameInterface()
interface.use_framebuffer()
interface.run()
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
# except Exception, e:
# if DEBUG or TESTRUN:
# raise(e)
# indent = len(program_name) * " "
# sys.stderr.write(program_name + ": " + repr(e) + "\n")
# sys.stderr.write(indent + " for help use --help")
# return 2
if __name__ == "__main__":
sys.exit(main())
| dozencrows/PiMony | PiMony.py | Python | mit | 2,894 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial
import time
import Queue
import thread
class Sercomm(object):
def __init__(self):
try:
self.ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
except:
print "Fehler mit der Seriellen Schnittstelle!\nBitte Daten in Datei sercomm.py ueberpruefen!"
exit()
self.warteschlange = Queue.Queue()
self.lock = thread.allocate_lock()
def schreiben(self,befehl):
self.ser.write(befehl)
def lesen(self,befehl):
self.lock.acquire()
self.warteschlange.put(befehl, True)
if self.warteschlange.empty() == True:
print "Warteschlange leer, gehe zurรผck!"
return
self.ser.write(self.warteschlange.get(True))
out = ''
check = ''
time.sleep(0.1)
while self.ser.inWaiting() > 0:
check= self.ser.read(1)
out += check
if check == ";":
break
self.warteschlange.task_done()
self.lock.release()
if out == '':
out = 'Leere Antwort'
return out
def schliessen(self):
self.ser.close()
def main():
doit = Sercomm()
# print ("Schalte 1 Band hoch")
# doit.schreiben("BU;")
# time.sleep(3)
seq = raw_input("Bitte Befehl eingeben zum Auslesen\n")
# print ("Lese aktuelle Frequenz VFO A aus")
print "Eingegebener Befehl: "+seq+"\n"
print "Antwort des Transceivers: "+doit.lesen(seq)+"\n"
doit.schliessen()
if __name__ == "__main__":
main()
| dora71/pyrigcontrol | sercomm.py | Python | agpl-3.0 | 1,642 |
#!/usr/bin/env python3
"""Retrieve results from the DuckDuckGo zero-click API in simple HTML format."""
import json as jsonlib
import logging
import re
import urllib.request, urllib.error, urllib.parse
__version__ = (1, 0, 0)
def results2html(results, results_priority=None, max_number_of_results=None,
ignore_incomplete=True, always_show_related=False,
header_start_level=1, hide_headers=False, hide_signature=False):
if not results:
return ''
if not results_priority:
results_priority = ['answer', 'abstract', 'definition', 'results',
'infobox', 'redirect', 'related']
if not always_show_related:
other = [x for x in results_priority if x != 'related']
if any(results.get(x).is_complete() for x in other):
results_priority = other
html_header = '<h{level:d}>{title}</h{level:d}>'
html_paragraph = '<p>{contents}</p>'
html_contents = []
children = [results.get(x) for x in results_priority]
results_count = 0
for level, child in _iterchildren(header_start_level, children):
html = child.as_html()
valid = html and (not ignore_incomplete or child.is_complete())
if not hide_headers and child.name and (valid or child.children()):
header = html_header.format(title=child.name, level=level)
html_contents.append(header)
if valid:
html_contents.append(html_paragraph.format(contents=html))
results_count += 1
if max_number_of_results and results_count >= max_number_of_results:
break
html_contents[:] = [x for x in html_contents if x]
if not html_contents:
return ''
if not hide_signature:
html_contents.append('<footer><small>Results from DuckDuckGo</small></footer>')
return ''.join(html_contents).strip()
def search(query, useragent='duckduckgo2html', **kwargs):
params = {
'q': query,
'format': 'json',
'pretty': '1',
'no_redirect': '1',
'no_html': '1',
'skip_disambig': '0',
}
params.update(kwargs)
enc_params = urllib.parse.urlencode(params)
url = 'http://api.duckduckgo.com/?' + enc_params
try:
request = urllib.request.Request(url, headers={'User-Agent': useragent})
response = urllib.request.urlopen(request)
json = jsonlib.loads(response.read().decode('utf-8'))
response.close()
return Results(json)
except urllib.error.HTTPError as err:
logging.error('Query failed with HTTPError code %s', err.code)
except urllib.error.URLError as err:
logging.error('Query failed with URLError %s', err.reason)
except Exception:
logging.error('Unhandled exception')
raise
return None
def _iterchildren(start_level, children):
for item in children:
grandchildren = item.children()
yield start_level, item
if grandchildren:
for subitem in _iterchildren(start_level+1, grandchildren):
yield subitem
def _html_url(url, display=None):
if not display:
display = url
return '<a href="{0}">{1}</a>'.format(url, display)
class Results(object):
def __init__(self, json):
self.json = jsonlib.dumps(json, indent=2)
self.type = json.get('Type')
self.answer = Answer(json)
self.results = _ResultList('Results', json.get('Results', []))
self.related = _ResultList('Related Topics', json.get('RelatedTopics', []))
self.abstract = Abstract(json)
self.definition = Definition(json)
self.redirect = Redirect(json)
self.infobox = Infobox(json)
def get(self, name):
if hasattr(self, name) and getattr(self, name):
return getattr(self, name)
return _ResultItemBase()
class _ResultItemBase(object):
"""Base class for results"""
def __init__(self, name=None):
self.name = name
def is_complete(self):
return False
def children(self):
return []
def as_html(self):
return ''
class _ResultList(_ResultItemBase):
"""A list of results"""
def __init__(self, name, items):
super().__init__(name)
self.items = [Result(x) for x in items]
def children(self):
return self.items
class Result(_ResultItemBase):
def __init__(self, json):
super().__init__(json.get('Name', '') if json else '')
self.topics = [Result(elem) for elem in json.get('Topics', [])]
self.html = json.get('Result', '') if json else ''
self.text = json.get('Text', '') if json else ''
self.url = json.get('FirstURL', '') if json else ''
def is_complete(self):
return True if self.text else False
def children(self):
return self.topics
def as_html(self):
if self.html:
return Result._rex_sub.sub('a> ', self.html)
elif self.text:
return self.text
_rex_sub = re.compile(r'a>(?! )')
class Abstract(_ResultItemBase):
def __init__(self, json):
super().__init__('Abstract')
self.html = json['Abstract']
self.text = json['AbstractText']
self.url = json['AbstractURL']
self.source = json['AbstractSource']
self.heading = json['Heading']
def is_complete(self):
return True if self.html or self.text else False
def as_html(self):
html_list = []
if self.heading:
html_list.append('<b>{0}</b>'.format(self.heading))
if self.html:
html_list.append(self.html)
elif self.text:
html_list.append(self.text)
if self.url:
html_list.append(_html_url(self.url, self.source))
return ' - '.join(html_list)
class Answer(_ResultItemBase):
def __init__(self, json):
super().__init__('Answer')
self.text = json['Answer']
self.type = json['AnswerType']
self.url = None
def is_complete(self):
return True if self.text else False
def as_html(self):
return self.text.replace('\n', '<br>').replace('\r', '')
class Definition(_ResultItemBase):
def __init__(self, json):
super().__init__('Definition')
self.text = json['Definition']
self.url = json['DefinitionURL']
self.source = json['DefinitionSource']
def is_complete(self):
return True if self.text else False
def as_html(self):
if self.text and self.url:
return self.text + ' - ' + _html_url(self.url, self.source)
elif self.text:
return self.text
elif self.url:
return _html_url(self.url, self.source)
class Redirect(_ResultItemBase):
def __init__(self, json):
super().__init__('Redirect')
self.url = json['Redirect']
def is_complete(self):
return True if self.url else False
def as_html(self):
return _html_url(self.url) if self.url else None
class Infobox(_ResultItemBase):
class Content(object):
def __init__(self, json):
self.data_type = json.get('data_type', '') if json else ''
self.label = json.get('label', '') if json else ''
self.value = json.get('value', '') if json else ''
def as_html(self):
if self.data_type == 'string' and self.label and self.value:
return '<b>{0}</b> {1}'.format(self.label, self.value)
def __init__(self, json):
super().__init__('Infobox')
infobox = json.get('Infobox') if json.get('Infobox') else {}
self.meta = infobox.get('meta', [])
self.content = [Infobox.Content(x) for x in infobox.get('content', [])]
def is_complete(self):
return True if self.content else False
def as_html(self):
contents = [x.as_html() for x in self.content]
return '<br>'.join(x for x in contents if x)
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'query',
nargs='*',
help='the search query')
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s v{0}.{1}.{2}'.format(*__version__))
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(filename)s: %(message)s')
if args.query:
queries = [' '.join(args.query)]
elif not sys.stdin.isatty():
queries = sys.stdin.read().splitlines()
else:
parser.print_help()
sys.exit(1)
for query in queries:
html = results2html(search(query))
if html:
print(html)
else:
logging.warning('No results found')
| nsubiron/SublimeSuricate | lib/thirdparty/duckduckgo2html.py | Python | gpl-3.0 | 8,822 |
#
# PicoTCP test library
# Author: Maarten Vandersteegen
#
import os
import time
from ctypes import *
#-----------------------------------------------------------------#
# Custom C data types #
#-----------------------------------------------------------------#
class pico_ip4(Structure):
_pack_ = 1
_fields_ = [("addr", c_uint)]
class pico_icmp4_stats(Structure):
_fields_ = [("dst", pico_ip4),
("size", c_ulong),
("seq", c_ulong),
("time", c_ulonglong),
("ttl", c_ulong),
("err", c_int)]
#-----------------------------------------------------------------#
# Test library #
#-----------------------------------------------------------------#
class PicoTCP(object):
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, dll="target/libpicotcp.so"):
"""Load device under test
"""
print "Loading dll %s"%dll
self.dut = CDLL(os.getcwd() + "/" + dll)
self.ping_stats = []
def stack_init(self, ip, netmask, tun="tun0"):
"""Initialize PicoTCP
"""
r = self.dut.picotcp_init(c_char_p(ip), c_char_p(netmask),
c_char_p(tun))
assert r == 0, "Initialize failed, rc = %d"%r
def get_ping_stats(self):
"""Retreive the ping stats
"""
return self.ping_stats
def stack_tick(self, period, interval=0.005):
"""Tick the stack for some period of time
"""
start = time.time()
while time.time() < (start + period):
self.dut.pico_stack_tick()
time.sleep(interval)
def ping(self, dest, count, interval, timeout, size):
"""Run ping client
"""
self.ping_stats = []
def cb_ping(s):
"""Ping client callback as closure to be able to
access the self object without the need to provide
it as an argument
"""
print "Callback called"
stats = {}
stats["err"] = s.contents.err
host = create_string_buffer(30)
self.dut.pico_ipv4_to_string(host, s.contents.dst.addr)
stats["host"] = host.value
stats["size"] = s.contents.size
stats["seq"] = s.contents.seq
stats["time"] = s.contents.time
stats["ttl"] = s.contents.ttl
self.ping_stats.append(stats)
# C signature: void(struct pico_icmp4_stats *s)
Callback = CFUNCTYPE(None, POINTER(pico_icmp4_stats))
cb = Callback(cb_ping)
self.dut.pico_icmp4_ping(c_char_p(dest),
c_int(count),
c_int(int(interval*1000)),
c_int(int(timeout*1000)),
c_int(size), cb)
self.stack_tick(timeout + 1)
if __name__ == "__main__":
dut = PicoTCP("../target/libpicotcp.so")
dut.stack_init("192.168.3.3", "255.255.255.0")
os.system("ifconfig tun0 inet 192.168.3.1 netmask 255.255.255.0")
dut.ping("192.168.3.1", 10, 1, 15, 64)
print dut.get_ping_stats()
| maartenvds/robot-framework-examples | python-c/lib/PicoTCP.py | Python | apache-2.0 | 3,265 |
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from .abbr import AbbrExtension
from .align import AlignExtension
from .codehilite import CodeHiliteExtension
from .comments import CommentsExtension
from .customblock import CustomBlockExtension
from .delext import DelExtension
from .emoticons import EmoticonExtension
from .fenced_code import FencedCodeExtension
from .footnotes import FootnoteExtension
from .french_typography import FrenchTypographyExtension
from .grid_tables import GridTableExtension
from .header_dec import DownHeaderExtension
from .kbd import KbdExtension
from .mathjax import MathJaxExtension
from .ping import PingExtension
from .smart_legend import SmartLegendExtension
from .subsuperscript import SubSuperscriptExtension
from .tables import TableExtension
from .title_anchor import TitleAnchorExtension
from .urlize import UrlizeExtension
from .video import VideoExtension
class ZdsExtension(Extension):
""" Add various extensions to Markdown class."""
def __init__(self, *args, **kwargs):
self.config = {
'inline': [False, ''],
'emoticons': [{}, ''],
'js_support': [False, ''],
'ping_url': [None, ''],
'marker_key': ["", 'Unique key for the extract used in reference elements'],
'enable_titles': [False, ''],
}
super(ZdsExtension, self).__init__(*args, **kwargs)
def _create_common_extension(self):
# create extensions :
sub_ext = SubSuperscriptExtension() # Sub and Superscript support
del_ext = DelExtension() # Del support
urlize_ext = UrlizeExtension() # Autolink support
typo_ext = FrenchTypographyExtension() # French typography
return [sub_ext, # Subscript support
del_ext, # Del support
urlize_ext, # Autolink support
typo_ext]
def _create_non_inline_extension(self):
mathjax_ext = MathJaxExtension() # MathJax support
kbd_ext = KbdExtension() # Keyboard support
emo_ext = EmoticonExtension(emoticons=self.emoticons) # smileys support
customblock_ext = CustomBlockExtension(classes={
"s(ecret)?": "spoiler",
"i(nformation)?": "information ico-after",
"q(uestion)?": "question ico-after",
"a(ttention)?": "warning ico-after",
"e(rreur)?": "error ico-after",
}) # CustomBlock support
align_ext = AlignExtension() # Right align and center support
video_ext = VideoExtension(js_support=self.js_support) # Video support
gridtable_ext = GridTableExtension() # Grid Table support
comment_ext = CommentsExtension(start_tag="<--COMMENT", end_tag="COMMENT-->") # Comment support
legend_ext = SmartLegendExtension() # Smart Legend support
dheader_ext = DownHeaderExtension(offset=2) # Offset header support
ping_ext = PingExtension(ping_url=self.ping_url) # Ping support
exts = [AbbrExtension(), # Abbreviation support, included in python-markdown
FootnoteExtension(unique_prefix=self.marker_key),
# Footnotes support, included in python-markdown
TableExtension(), # Tables support, included in python-markdown
# Extended syntaxe for code block support, included in python-markdown
CodeHiliteExtension(linenums=True, guess_lang=False),
customblock_ext, # CustomBlock support
kbd_ext, # Kbd support
emo_ext, # Smileys support
video_ext, # Video support
gridtable_ext, # Grid tables support
align_ext, # Right align and center support
dheader_ext, # Down Header support
mathjax_ext, # Mathjax support
FencedCodeExtension(),
comment_ext, # Comment support
legend_ext, # Legend support
ping_ext, # Ping support
]
if self.enable_titles:
title_anchor_ext = TitleAnchorExtension(link_position="after", marker_key=self.marker_key)
exts.append(title_anchor_ext)
return exts
def extendZMarkdown(self, md, md_globals):
""" Register extension instances. """
config = self.getConfigs()
self.inline = config.get("inline", True)
self.emoticons = config.get("emoticons", {})
self.js_support = config.get("js_support", False)
self.ping_url = config.get('ping_url', None)
if self.ping_url is None:
self.ping_url = lambda _: None
self.marker_key = config.get("marker_key", "")
self.enable_titles = config.get("enable_titles", True)
md.inline = self.inline
# Define used ext
exts = self._create_common_extension()
if not self.inline:
exts.extend(self._create_non_inline_extension())
md.registerExtensions(exts, {})
if self.inline:
md.inlinePatterns.pop("image_link")
md.inlinePatterns.pop("image_reference")
md.inlinePatterns.pop("reference")
md.inlinePatterns.pop("short_reference")
md.inlinePatterns.pop("linebreak")
def makeExtension(*args, **kwargs):
return ZdsExtension(*args, **kwargs)
| zestedesavoir/Python-ZMarkdown | zmarkdown/extensions/zds.py | Python | bsd-3-clause | 5,411 |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for describing firewall rules."""
from googlecloudsdk.compute.lib import base_classes
class Describe(base_classes.GlobalDescriber):
"""Describe a Google Compute Engine firewall rule.
*{command}* displays all data associated with a Google Compute
Engine firewall rule in a project.
"""
@staticmethod
def Args(parser):
cli = Describe.GetCLIGenerator()
base_classes.GlobalDescriber.Args(parser, 'compute.firewalls', cli,
'compute.firewall-rules')
base_classes.AddFieldsFlag(parser, 'firewalls')
@property
def service(self):
return self.compute.firewalls
@property
def resource_type(self):
return 'firewalls'
| wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/firewall_rules/describe.py | Python | apache-2.0 | 750 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFCONFIGClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python import framework
from tensorflow.python.client import session
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager.context import LogicalDevice
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
mock = test.mock
@test_util.run_all_in_graph_and_eager_modes
class TFConfigClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testNormalClusterSpecRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
actual_cluster_spec = cluster_resolver.cluster_spec()
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testAutomaticMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('ps0:2222', cluster_resolver.master())
def testSpecifiedTaskTypeAndIndexMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('worker1:2222', cluster_resolver.master('worker', 1))
def testSessionMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"session_master": "sessionmaster:2222",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('sessionmaster:2222', cluster_resolver.master())
def testRpcLayerRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('grpc://ps0:2222', cluster_resolver.master())
def testTaskTypeIndexRpcRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('ps', cluster_resolver.task_type)
self.assertEqual(0, cluster_resolver.task_id)
self.assertEqual('grpc', cluster_resolver.rpc_layer)
def testParameterOverrides(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 1
}
}
"""
cluster_resolver = TFConfigClusterResolver(task_type='ps', task_id=0)
self.assertEqual('grpc://ps0:2222', cluster_resolver.master())
self.assertEqual('ps', cluster_resolver.task_type)
self.assertEqual(0, cluster_resolver.task_id)
cluster_resolver.task_type = 'worker'
cluster_resolver.task_id = 1
cluster_resolver.rpc_layer = 'test'
self.assertEqual('test://worker1:2222', cluster_resolver.master())
self.assertEqual('worker', cluster_resolver.task_type)
self.assertEqual(1, cluster_resolver.task_id)
self.assertEqual('test', cluster_resolver.rpc_layer)
def testTaskTypeCastToString(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"123456": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": 123456,
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('123456', cluster_resolver.task_type)
def testTaskIndexCastToInteger(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": "1"
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual(1, cluster_resolver.task_id)
def testTaskIndexOverride(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker": ["worker0:2222", "worker1:2222"]
},
"task": {
"type": "worker",
"index": "0"
}
}
"""
cluster_resolver = TFConfigClusterResolver(task_id=1)
self.assertEqual(1, cluster_resolver.task_id)
def testZeroItemsInClusterSpecMasterRead(self):
os.environ['TF_CONFIG'] = """
{}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('', cluster_resolver.master())
def testOneItemInClusterSpecMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker": ["worker0:2222"]
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('', cluster_resolver.master())
@mock.patch.object(framework.config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
def testNumAcceleratorsFilterTasksByEnvVar(self, mock_list_devices,
mock_eager_list_devices):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker1": ["w10:2222"],
"worker2": ["w21:2222", "w22:2222", "w23:2222", "w24:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "worker1",
"index": "0"
}
}
"""
devices = [
LogicalDevice('/job:worker1/task:0/device:TPU:0', 'TPU'),
LogicalDevice('/job:worker1/task:0/device:TPU:1', 'TPU'),
LogicalDevice('/job:worker1/task:0/device:GPU:0', 'GPU'),
LogicalDevice('/job:worker1/task:0/device:GPU:1', 'GPU'),
LogicalDevice('/job:worker2/task:1/device:TPU:2', 'TPU'),
LogicalDevice('/job:worker2/task:2/device:TPU:3', 'TPU'),
LogicalDevice('/job:worker2/task:3/device:GPU:2', 'GPU'),
LogicalDevice('/job:worker2/task:4/device:GPU:3', 'GPU'),
]
device_list = [
session._DeviceAttributes(d.name, d.device_type, 1024, 0)
for d in devices
]
mock_eager_list_devices.return_value = devices
mock_list_devices.return_value = device_list
resolver = TFConfigClusterResolver()
# By default we read from TF_CONFIG
self.assertEqual(resolver.num_accelerators(), {'TPU': 2, 'GPU': 2})
# Override still works when we want it to
self.assertEqual(resolver.num_accelerators(task_type='worker2', task_id=3),
{'GPU': 1})
if __name__ == '__main__':
test.main()
| karllessard/tensorflow | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py | Python | apache-2.0 | 9,199 |
import _plotly_utils.basevalidators
class BValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="b", parent_name="layout.title.pad", **kwargs):
super(BValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "layoutstyle"),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/title/pad/_b.py | Python | mit | 440 |
from django import template
from django.template.loader import render_to_string
from django.utils import six
from django.utils.html import mark_safe
import re
register = template.Library()
@register.filter
def messages_style(messages):
c = {}
c['messages'] = messages
return render_to_string('tag-messages-snippet.html',c)
| nirvaris/nirvaris-theme-default | themedefault/templatetags/theme_messages_tags.py | Python | mit | 339 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RuleGroup.action'
db.alter_column('rcal_rulegroup', 'action', self.gf('django.db.models.fields.SmallIntegerField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'RuleGroup.action'
raise RuntimeError("Cannot reverse this migration. 'RuleGroup.action' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rcal.event': {
'Meta': {'object_name': 'Event'},
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rcal.Resource']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rcal.resource': {
'Meta': {'object_name': 'Resource'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'rcal.rule': {
'Meta': {'ordering': "('id',)", 'object_name': 'Rule'},
'check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rule_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rcal.RuleGroup']"})
},
'rcal.rulegroup': {
'Meta': {'object_name': 'RuleGroup'},
'action': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rcal'] | apollo13/django-rcal | rcal/migrations/0002_chg_field_rulegroup_action.py | Python | bsd-3-clause | 5,667 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'apps.sampleapp.views.index', name='index'),
url(r'^api/', include('apps.sampleapp.urls')),
url(r'^.*', 'apps.sampleapp.views.index') #catch-all
) | biomassives/django-angular-docker-seed | backend/urls.py | Python | unlicense | 469 |
import os
import click
from keep import cli, utils
@click.command('run', short_help='Executes a saved command.',
context_settings=dict(ignore_unknown_options=True))
@click.argument('pattern')
@click.argument('arguments', nargs=-1, type=click.UNPROCESSED)
@click.option('--safe', is_flag=True, help='Ignore missing arguments')
@cli.pass_context
def cli(ctx, pattern, arguments, safe):
"""Executes a saved command."""
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
pcmd = utils.create_pcmd(cmd)
raw_params, params, defaults = utils.get_params_in_pcmd(pcmd)
arguments = list(arguments)
kargs = {}
for r, p, d in zip(raw_params, params, defaults):
if arguments:
val = arguments.pop(0)
click.echo("{}: {}".format(p, val))
kargs[r] = val
elif safe:
if d:
kargs[r] = d
else:
p_default = d if d else None
val = click.prompt("Enter value for '{}'".format(p), default=p_default)
kargs[r] = val
click.echo("\n")
final_cmd = utils.substitute_pcmd(pcmd, kargs, safe)
command = "$ {} :: {}".format(final_cmd, desc)
if click.confirm("Execute\n\t{}\n\n?".format(command), default=True):
os.system(final_cmd)
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to run, Add one by 'keep new'. ")
| paci4416/keep | keep/commands/cmd_run.py | Python | mit | 1,761 |
import base64
import os
from django.core.paginator import Paginator
from cda.integration import render_cda
from l2vi.integration import gen_cda_xml, send_cda_xml
import collections
from integration_framework.views import get_cda_data
from utils.response import status_response
from hospitals.models import Hospitals
import operator
import re
import time
from datetime import datetime, time as dtime, timedelta
from operator import itemgetter
import pytz
import simplejson as json
from dateutil.relativedelta import relativedelta
from django.contrib.auth.decorators import login_required
from django.core.files.base import ContentFile
from django.db import transaction
from django.db.models import Q, Prefetch
from django.http import HttpRequest
from django.http import JsonResponse
from django.utils import dateformat
from django.utils import timezone
from api import sql_func
from api.dicom import search_dicom_study
from api.patients.views import save_dreg
from api.sql_func import get_fraction_result, get_field_result
from api.stationar.stationar_func import forbidden_edit_dir, desc_to_data
from api.views import get_reset_time_vars
from appconf.manager import SettingManager
from clients.models import Card, DocumentType, Individual, DispensaryReg, BenefitReg
from directions.models import (
DirectionDocument,
DocumentSign,
Napravleniya,
Issledovaniya,
NumberGenerator,
Result,
ParaclinicResult,
Recipe,
MethodsOfTaking,
ExternalOrganization,
MicrobiologyResultCulture,
MicrobiologyResultCultureAntibiotic,
DirectionToUserWatch,
IstochnikiFinansirovaniya,
DirectionsHistory,
MonitoringResult,
TubesRegistration,
DirectionParamsResult,
IssledovaniyaFiles,
)
from directory.models import Fractions, ParaclinicInputGroups, ParaclinicTemplateName, ParaclinicInputField, HospitalService, Researches
from laboratory import settings
from laboratory import utils
from laboratory.decorators import group_required
from laboratory.settings import DICOM_SERVER, TIME_ZONE
from laboratory.utils import current_year, strdatetime, strdate, strtime, tsdatetime, start_end_year, strfdatetime, current_time
from pharmacotherapy.models import ProcedureList, ProcedureListTimes, Drugs, FormRelease, MethodsReception
from results.sql_func import get_not_confirm_direction, get_laboratory_results_by_directions
from results.views import result_normal, result_print
from rmis_integration.client import Client, get_direction_full_data_cache
from slog.models import Log
from statistics_tickets.models import VisitPurpose, ResultOfTreatment, Outcomes, Place
from users.models import DoctorProfile
from utils.common import non_selected_visible_type, none_if_minus_1, values_from_structure_data
from utils.dates import normalize_date, date_iter_range, try_strptime
from utils.dates import try_parse_range
from utils.xh import check_float_is_valid, short_fio_dots
from .sql_func import get_history_dir, get_confirm_direction, filter_direction_department, get_lab_podr, filter_direction_doctor, get_confirm_direction_patient_year, get_patient_contract
from api.stationar.stationar_func import hosp_get_hosp_direction, hosp_get_text_iss
from forms.forms_func import hosp_get_operation_data
from medical_certificates.models import ResearchesCertificate, MedicalCertificates
from utils.data_verification import data_parse
from utils.expertise import get_expertise
@login_required
@group_required("ะะตัะฐัะธะน ะฒัะฐั", "ะัะฐั-ะปะฐะฑะพัะฐะฝั", "ะะฟะตัะฐัะพั ะปะตัะฐัะตะณะพ ะฒัะฐัะฐ", "ะะฐะฟะพะปะฝะตะฝะธะต ะผะพะฝะธัะพัะธะฝะณะพะฒ", "ะกะฒะธะดะตัะตะปัััะฒะพ ะพ ัะผะตััะธ-ะดะพัััะฟ")
def directions_generate(request):
result = {"ok": False, "directions": [], "directionsStationar": [], "message": ""}
if request.method == "POST":
p = json.loads(request.body)
card_pk = p.get("card_pk")
card = None
if card_pk == -1:
hospital: Hospitals = request.user.doctorprofile.get_hospital()
if hospital.client:
card = hospital.client
else:
card = Individual.import_from_tfoms(
{
"enp": f"1010{hospital.code_tfoms}",
"family": "ะะพะปัะฝะธัะฐ",
"given": hospital.safe_short_title[:120],
"patronymic": "",
"gender": 'ะผ',
"birthdate": '2000-01-01',
},
need_return_card=True,
)
hospital.client = card
hospital.save()
if card:
card_pk = card.pk
else:
card = Card.objects.get(pk=card_pk)
if card.base.forbidden_create_napr:
result["message"] = "ะะปั ะดะฐะฝะฝะพะณะพ ัะธะฟะฐ ะบะฐัั ะฝะตะปัะทั ัะพะทะดะฐัั ะฝะฐะฟัะฐะฒะปะตะฝะธั"
return JsonResponse(result)
fin_source = p.get("fin_source", -1)
fin_source_pk = int(fin_source) if (isinstance(fin_source, int) or str(fin_source).isdigit()) else fin_source
args = [
card_pk,
p.get("diagnos"),
fin_source_pk,
p.get("history_num"),
p.get("ofname_pk"),
request.user.doctorprofile,
p.get("researches"),
p.get("comments"),
p.get("for_rmis"),
p.get("rmis_data", {}),
]
kwargs = dict(
vich_code=p.get("vich_code", ""),
count=p.get("count", 1),
discount=p.get("discount", 0),
parent_iss=p.get("parent_iss", None),
parent_slave_hosp=p.get("parent_slave_hosp", None),
counts=p.get("counts", {}),
localizations=p.get("localizations", {}),
service_locations=p.get("service_locations", {}),
direction_purpose=p.get("direction_purpose", "NONE"),
external_organization=p.get("external_organization", "NONE"),
direction_form_params=p.get("direction_form_params", {}),
current_global_direction_params=p.get("current_global_direction_params", {}),
hospital_department_override=p.get("hospital_department_override", -1),
)
for _ in range(p.get("directions_count", 1)):
rc = Napravleniya.gen_napravleniya_by_issledovaniya(*args, **kwargs)
result["ok"] = rc["r"]
if "message" in rc:
result["message"] = rc["message"]
result["directions"].extend(rc["list_id"])
result["directionsStationar"].extend(rc["list_stationar_id"])
if not result["ok"]:
break
if result["ok"]:
for pk in result["directions"]:
d: Napravleniya = Napravleniya.objects.get(pk=pk)
d.fill_acsn()
return JsonResponse(result)
@login_required
@group_required("ะะตัะฐัะธะน ะฒัะฐั", "ะัะฐั-ะปะฐะฑะพัะฐะฝั", "ะัะฐั ะบะพะฝััะปััะฐัะธะน")
def add_additional_issledovaniye(request):
saved = False
p = json.loads(request.body)
direction_pk = p.get("direction_pk", None)
who_add = request.user.doctorprofile
researches = p.get("researches", None)
pks = []
created_later_research = {x: True for x in Issledovaniya.objects.values_list("research_id", flat=True).filter(napravleniye_id=direction_pk)}
with transaction.atomic():
for research_pk in researches:
if research_pk not in created_later_research:
iss = Issledovaniya(napravleniye_id=direction_pk, research_id=research_pk, doc_add_additional=who_add)
iss.save()
pks.append(iss.pk)
saved = True
if saved:
return status_response(True, data={"pks": pks})
return status_response(False, "ะะฟะตัะฐัะธั ะฝะต ะฒัะฟะพะปะฝะตะฝะฐ")
@login_required
def directions_history(request):
# SQL-query
res = {"directions": []}
request_data = json.loads(request.body)
pk = request_data.get("patient", -1)
req_status = request_data.get("type", 4)
iss_pk = request_data.get("iss_pk", None)
for_slave_hosp = request_data.get("forHospSlave", False)
services = request_data.get("services", [])
services = list(map(int, services or []))
date_start, date_end = try_parse_range(request_data["date_from"], request_data["date_to"])
date_start = datetime.combine(date_start, dtime.min)
date_end = datetime.combine(date_end, dtime.max)
user_creater = -1
patient_card = -1
final_result = []
parent_obj = {"iss_id": "", "parent_title": "", "parent_is_hosp": "", "parent_is_doc_refferal": ""}
# status: 4 - ะฒัะฟะธัะฐะฝะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะผ, 0 - ัะพะปัะบะพ ะฒัะฟะธัะฐะฝะฝัะต, 1 - ะะฐัะตัะธะฐะป ะฟะพะปััะตะฝ ะปะฐะฑะพัะฐัะพัะธะตะน. 2 - ัะตะทัะปััะฐั ะฟะพะดัะฒะตัะถะดะตะฝ, 3 - ะฝะฐะฟัะฐะฒะปะตะฝะธั ะฟะฐัะธะตะฝัะฐ, -1 - ะพัะผะตะฝะตะฝะพ,
if req_status == 4:
user_creater = request.user.doctorprofile.pk
if req_status in [0, 1, 2, 3, 5]:
patient_card = pk
if req_status == 5:
# contracts = PersonContract.objects.filter(patient_card_id=pk, create_at__gte=date_start, create_at__lte=date_end).order_by('-create_at')
# for i in contracts:
# print(i.dir_list, i.pk, i.patient_card_id, i.num_contract, i.create_at)
patient_contract = get_patient_contract(date_start, date_end, patient_card)
count = 0
last_contract = None
temp_data = {
'pk': "",
'status': "",
'researches': "",
"researches_pks": "",
'date': "",
'cancel': False,
'checked': False,
'pacs': False,
'has_hosp': False,
'has_descriptive': False,
'maybe_onco': False,
'is_application': False,
'lab': "",
'parent': parent_obj,
'is_expertise': False,
'expertise_status': False,
'person_contract_pk': "",
'person_contract_dirs': "",
}
for i in patient_contract:
if i.id != last_contract and count != 0:
final_result.append(temp_data.copy())
temp_data = {
'pk': "",
'status': "",
'researches': "",
"researches_pks": "",
'date': "",
'cancel': False,
'checked': False,
'pacs': False,
'has_hosp': False,
'has_descriptive': False,
'maybe_onco': False,
'is_application': False,
'lab': "",
'parent': parent_obj,
'is_expertise': False,
'expertise_status': False,
'person_contract_pk': "",
'person_contract_dirs': "",
}
temp_data['pk'] = i.id
if temp_data['researches']:
temp_data['researches'] = f"{temp_data['researches']} | {i.title}"
else:
temp_data['researches'] = f"{i.title}"
temp_data['cancel'] = i.cancel
temp_data['date'] = i.date_create
temp_data['status'] = i.sum_contract
temp_data['person_contract_dirs'] = i.dir_list
last_contract = i.id
count += 1
final_result.append(temp_data.copy())
res['directions'] = final_result
return JsonResponse(res)
is_service = False
if services:
is_service = True
if not is_service:
services = [-1]
is_parent = False
if iss_pk:
is_parent = True
result_sql = get_history_dir(date_start, date_end, patient_card, user_creater, services, is_service, iss_pk, is_parent, for_slave_hosp)
# napravleniye_id, cancel, iss_id, tubesregistration_id, res_id, res_title, date_create,
# doc_confirmation_id, time_recive, ch_time_save, podr_title, is_hospital, maybe_onco, can_has_pacs,
# is_slave_hospital, is_treatment, is_stom, is_doc_refferal, is_paraclinic, is_microbiology, parent_id, study_instance_uid, parent_slave_hosp_id
researches_pks = []
researches_titles = ''
last_dir, dir, status, date, cancel, pacs, has_hosp, has_descriptive = None, None, None, None, None, None, None, None
maybe_onco, is_application, is_expertise, expertise_status = False, False, False, False
parent_obj = {"iss_id": "", "parent_title": "", "parent_is_hosp": "", "parent_is_doc_refferal": ""}
person_contract_pk = -1
status_set = {-2}
lab = set()
lab_title = None
person_contract_dirs = ""
type_service = request_data.get("type_service", None)
for i in result_sql:
if i[14]:
continue
elif type_service == 'is_paraclinic' and not i[18]:
continue
elif type_service == 'is_doc_refferal' and not i[17]:
continue
elif type_service == 'is_lab' and (i[11] or i[14] or i[15] or i[16] or i[17] or i[18] or i[19]):
continue
if i[0] != last_dir:
status = min(status_set)
if len(lab) > 0:
lab_title = ', '.join(lab)
if (req_status == 2 and status == 2) or (req_status in [3, 4] and status != -2) or (req_status == 1 and status == 1) or (req_status == 0 and status == 0):
final_result.append(
{
'pk': dir,
'status': status,
'researches': researches_titles,
"researches_pks": researches_pks,
'date': date,
'cancel': cancel,
'checked': False,
'pacs': pacs,
'has_hosp': has_hosp,
'has_descriptive': has_descriptive,
'maybe_onco': maybe_onco,
'is_application': is_application,
'lab': lab_title,
'parent': parent_obj,
'is_expertise': is_expertise,
'expertise_status': expertise_status,
'person_contract_pk': person_contract_pk,
'person_contract_dirs': person_contract_dirs,
}
)
person_contract_pk = -1
person_contract_dirs = ""
dir = i[0]
expertise_data = get_expertise(dir)
is_expertise = False
expertise_status = False
if expertise_data.get('status') != 'empty':
is_expertise = True
expertise_status = 2 if expertise_data.get('status') == 'ok' else 0
researches_titles = ''
date = i[6]
status_set = set()
researches_pks = []
pacs = None
maybe_onco = False
is_application = False
parent_obj = {"iss_id": "", "parent_title": "", "parent_is_hosp": "", "parent_is_doc_refferal": ""}
if i[13]:
if i[21]:
pacs = f'{DICOM_SERVER}/osimis-viewer/app/index.html?study={i[21]}'
else:
pacs = search_dicom_study(int(dir))
has_hosp = False
if i[11]:
has_hosp = True
lab = set()
if researches_titles:
researches_titles = f'{researches_titles} | {i[5]}'
else:
researches_titles = i[5]
status_val = 0
has_descriptive = False
if i[8] or i[9]:
status_val = 1
if i[7] or i[24]:
status_val = 2
if i[1]:
status_val = -1
status_set.add(status_val)
researches_pks.append(i[4])
if i[12]:
maybe_onco = True
if i[20]:
parent_obj = get_data_parent(i[20])
last_dir = dir
cancel = i[1]
title_podr = i[10]
if title_podr is None:
title_podr = ''
if title_podr not in lab:
lab.add(title_podr)
if i[14] or i[15] or i[16] or i[17] or i[18] or i[19] or i[23]:
has_descriptive = True
if i[24]:
is_application = True
if i[26]:
person_contract_pk = i[26]
person_contract_dirs = i[27]
status = min(status_set)
if len(lab) > 0:
lab_title = ', '.join(lab)
if (req_status == 2 and status == 2) or (req_status in [3, 4] and status != -2) or (req_status == 1 and status == 1) or (req_status == 0 and status == 0):
final_result.append(
{
'pk': dir,
'status': status,
'researches': researches_titles,
"researches_pks": researches_pks,
'date': date,
'cancel': cancel,
'checked': False,
'pacs': pacs,
'has_hosp': has_hosp,
'has_descriptive': has_descriptive,
'maybe_onco': maybe_onco,
'is_application': is_application,
'lab': lab_title,
'parent': parent_obj,
'is_expertise': is_expertise,
'expertise_status': expertise_status,
'person_contract_pk': person_contract_pk,
'person_contract_dirs': person_contract_dirs,
}
)
res['directions'] = final_result
return JsonResponse(res)
def get_data_parent(parent_id):
iss_obj = Issledovaniya.objects.get(pk=parent_id)
research_title = iss_obj.research.title
research_is_hosp = iss_obj.research.is_hospital
research_is_doc_refferal = iss_obj.research.is_doc_refferal
direction = iss_obj.napravleniye_id
is_confirm = False
if iss_obj.time_confirmation:
is_confirm = True
return {
"iss_id": parent_id,
"pk": direction,
"parent_title": research_title,
"parent_is_hosp": research_is_hosp,
"parent_is_doc_refferal": research_is_doc_refferal,
"is_confirm": is_confirm,
}
@login_required
@group_required("ะัะฐั ััะฐัะธะพะฝะฐัะฐ")
def hosp_set_parent(request):
# SQL-query
date_end = utils.current_time()
days_ago = SettingManager.get("days_before_hosp", default='30', default_type='i')
date_start = date_end + relativedelta(days=-days_ago)
date_start = datetime.combine(date_start, dtime.min)
date_end = datetime.combine(date_end, dtime.max)
request_data = json.loads(request.body)
patient_card = request_data.get("patient", -1)
user_creater = -1
iss_pk = None
for_slave_hosp = False
is_service = False
services = [-1]
is_parent = False
result_sql = get_history_dir(date_start, date_end, patient_card, user_creater, services, is_service, iss_pk, is_parent, for_slave_hosp)
# napravleniye_id, cancel, iss_id, tubesregistration_id, res_id, res_title, date_create,
# doc_confirmation_id, time_recive, ch_time_save, podr_title, is_hospital, maybe_onco, can_has_pacs,
# is_slave_hospital, is_treatment, is_stom, is_doc_refferal, is_paraclinic, is_microbiology, parent_id, study_instance_uid
res = {"directions": []}
for i in result_sql:
if i[11]:
if forbidden_edit_dir(i[0]):
continue
researche_title = i[5]
dir = i[0]
iss_id = i[2]
date_create = i[6]
res['directions'].append({'dir_num': dir, 'iss_id': iss_id, 'researche_titles': researche_title, 'date': date_create})
return JsonResponse(res)
@login_required
@group_required("ะัะฐั ััะฐัะธะพะฝะฐัะฐ")
def update_parent(request):
request_data = json.loads(request.body)
parent = request_data.get("parent")
slave_dirs = request_data.get("slave_dirs", [])
g = [str(x) for x in request.user.groups.all()]
forbidden = "ะฃะฟัะฐะฒะปะตะฝะธะต ะธะตัะฐัั
ะธะตะน ะธััะพัะธะธ" not in g
iss = Issledovaniya.objects.filter(napravleniye__in=slave_dirs)
for r in iss:
if r.research.is_hospital and forbidden:
return JsonResponse({"ok": False, "message": "ะะตั ะฟัะฐะฒ ะดะปั ััะฐัะธะพะฝะฐัะฝะพะณะพ ะธะทะผะตะฝะตะฝะธั"})
parent_iss = None
if parent is not None and parent > -1:
parent_iss = Issledovaniya.objects.get(pk=parent)
Napravleniya.objects.filter(pk__in=slave_dirs).update(parent=parent_iss)
if parent == -1:
Napravleniya.objects.filter(pk__in=slave_dirs).update(parent=None)
dir_parent = ""
if parent_iss:
dir_parent = parent_iss.napravleniye.pk
for i in slave_dirs:
Log(key=i, type=5003, body=json.dumps({"dir": i, "parent_dir": dir_parent, "parent_iss_id": parent}), user=request.user.doctorprofile).save()
return JsonResponse({"ok": True, "message": ""})
@login_required
def directions_rmis_directions(request):
request_data = json.loads(request.body)
pk = request_data.get("pk")
rows = []
if pk and Card.objects.filter(pk=pk, base__is_rmis=True).exists():
c = Client(modules=["directions", "services"])
sd = c.directions.get_individual_active_directions(Card.objects.get(pk=pk).number)
dirs_data = [c.directions.get_direction_full_data(x) for x in sd if not Napravleniya.objects.filter(rmis_number=x).exists()]
rows = [x for x in dirs_data if x]
return JsonResponse({"rows": rows})
@login_required
def directions_rmis_direction(request):
request_data = json.loads(request.body)
data = {}
pk = request_data.get("pk")
if pk and not Napravleniya.objects.filter(rmis_number=pk).exists():
data = get_direction_full_data_cache(pk)
if not data:
c = Client(modules=["directions", "services"])
data = c.directions.get_direction_full_data(pk)
return JsonResponse(data)
@login_required
def directions_cancel(request):
response = {"cancel": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if Napravleniya.objects.filter(pk=pk).exists():
direction = Napravleniya.objects.get(pk=pk)
direction.cancel = not direction.cancel
direction.save()
response["cancel"] = direction.cancel
Log(key=pk, type=5002, body="ะดะฐ" if direction.cancel else "ะฝะตั", user=request.user.doctorprofile).save()
return JsonResponse(response)
@login_required
def directions_results(request):
result = {"ok": False, "desc": False, "direction": {"pk": -1, "doc": "", "date": ""}, "client": {}, "full": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if Napravleniya.objects.filter(pk=pk).exists():
napr = Napravleniya.objects.get(pk=pk)
dates = {}
for iss in Issledovaniya.objects.filter(napravleniye=napr):
if iss.research.desc:
result["desc"] = True
if not request_data.get("force", False):
return JsonResponse(result)
for iss in Issledovaniya.objects.filter(napravleniye=napr, time_save__isnull=False):
if iss.time_save:
dt = str(dateformat.format(iss.time_save, settings.DATE_FORMAT))
if dt not in dates.keys():
dates[dt] = 0
dates[dt] += 1
maxdate = ""
if dates != {}:
maxdate = max(dates.items(), key=operator.itemgetter(1))[0]
else:
iss = Issledovaniya.objects.filter(napravleniye=napr)[0]
if iss.time_confirmation:
maxdate = str(dateformat.format(iss.time_confirmation, settings.DATE_FORMAT))
iss_list = Issledovaniya.objects.filter(napravleniye=napr)
t = 0
if not iss_list.filter(time_confirmation__isnull=True).exists() or iss_list.filter(deferred=False).exists():
result["direction"]["pk"] = napr.pk
result["full"] = False
result["ok"] = True
result["pacs"] = None if not iss_list[0].research.podrazdeleniye or not iss_list[0].research.podrazdeleniye.can_has_pacs else search_dicom_study(pk)
if iss_list.filter(time_confirmation__isnull=False).exists():
result["direction"]["doc"] = iss_list.filter(time_confirmation__isnull=False)[0].doc_confirmation_fio
if iss_list.filter(time_confirmation__isnull=True, deferred=False).exists():
result["direction"]["doc"] = result["direction"]["doc"] + " (ะฒัะฟะพะปะฝะตะฝะพ ะฝะต ะฟะพะปะฝะพัััั)"
else:
result["full"] = True
else:
result["direction"]["doc"] = "ะะต ะฟะพะดัะฒะตัะถะดะตะฝะพ"
result["direction"]["date"] = maxdate
result["client"]["sex"] = napr.client.individual.sex
result["client"]["fio"] = napr.client.individual.fio()
result["client"]["age"] = napr.client.individual.age_s(direction=napr)
result["client"]["cardnum"] = napr.client.number_with_type()
result["client"]["dr"] = napr.client.individual.bd()
result["results"] = collections.OrderedDict()
isses = []
for issledovaniye in iss_list.order_by("tubes__id", "research__sort_weight"):
if issledovaniye.pk in isses:
continue
isses.append(issledovaniye.pk)
t += 1
kint = "%s_%s_%s_%s" % (
t,
"-1" if not issledovaniye.research.direction else issledovaniye.research.direction_id,
issledovaniye.research.sort_weight,
issledovaniye.research_id,
)
result["results"][kint] = {"title": issledovaniye.research.title, "fractions": collections.OrderedDict(), "sort": issledovaniye.research.sort_weight, "tube_time_get": ""}
if not issledovaniye.deferred or issledovaniye.time_confirmation:
for isstube in issledovaniye.tubes.all():
if isstube.time_get:
result["results"][kint]["tube_time_get"] = str(dateformat.format(isstube.time_get_local, settings.DATE_FORMAT))
break
results = Result.objects.filter(issledovaniye=issledovaniye).order_by("fraction__sort_weight") # ะัะฑะพัะบะฐ ัะตะทัะปััะฐัะพะฒ ะธะท ะฑะฐะทั
n = 0
for res in results: # ะะตัะตะฑะพั ัะตะทัะปััะฐัะพะฒ
pk = res.fraction.sort_weight
if not pk or pk <= 0:
pk = res.fraction_id
if res.fraction.render_type == 0:
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = result_normal(res.value)
result["results"][kint]["fractions"][pk]["title"] = res.fraction.title
result["results"][kint]["fractions"][pk]["units"] = res.get_units()
refs = res.get_ref(full=True)
ref_m = refs["m"]
ref_f = refs["f"]
if isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f
else:
try:
tmp_results = json.loads("{}" if not res.value else res.value).get("rows", {})
except Exception:
tmp_results = {}
n = 0
for row in tmp_results.values():
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "ะัะดะตะปะตะฝะฝะฐั ะบัะปััััะฐ"
result["results"][kint]["fractions"][tmp_pk]["result"] = row["title"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
for subrow in row["rows"].values():
if "null" in subrow["value"]:
continue
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = subrow["title"]
result["results"][kint]["fractions"][tmp_pk]["result"] = subrow["value"]
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "S - ััะฒััะฒะธัะตะปะตะฝ; R - ัะตะทะธััะตะฝัะตะฝ; I - ะฟัะพะผะตะถััะพัะฝะฐั ััะฒััะฒะธัะตะปัะฝะพััั;"
result["results"][kint]["fractions"][tmp_pk]["result"] = ""
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
if issledovaniye.lab_comment and issledovaniye.lab_comment != "":
n += 1
tmp_pk = "%d_%d" % (pk, n)
if tmp_pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][tmp_pk] = {}
result["results"][kint]["fractions"][tmp_pk]["title"] = "ะะพะผะผะตะฝัะฐัะธะน"
result["results"][kint]["fractions"][tmp_pk]["result"] = issledovaniye.lab_comment.replace("\n", "<br/>")
result["results"][kint]["fractions"][tmp_pk]["ref_m"] = {}
result["results"][kint]["fractions"][tmp_pk]["ref_f"] = {}
result["results"][kint]["fractions"][tmp_pk]["units"] = ""
else:
fr_list = Fractions.objects.filter(research=issledovaniye.research)
for fr in fr_list:
pk = fr.sort_weight
if not pk or pk <= 0:
pk = fr.pk
if pk not in result["results"][kint]["fractions"].keys():
result["results"][kint]["fractions"][pk] = {}
result["results"][kint]["fractions"][pk]["result"] = "ะพัะปะพะถะตะฝ" # ะะฝะฐัะตะฝะธะต
result["results"][kint]["fractions"][pk]["title"] = fr.title # ะะฐะทะฒะฐะฝะธะต ััะฐะบัะธะธ
result["results"][kint]["fractions"][pk]["units"] = fr.get_unit_str() # ะะดะตะฝะธัั ะธะทะผะตัะตะฝะธั
ref_m = {"": ""} # fr.ref_m
ref_f = {"": ""} # fr.ref_f
if not isinstance(ref_m, str):
ref_m = json.loads(ref_m)
if not isinstance(ref_f, str):
ref_f = json.loads(ref_f)
result["results"][kint]["fractions"][pk]["ref_m"] = ref_m # ะ ะตัะตัะตะฝัั ะ
result["results"][kint]["fractions"][pk]["ref_f"] = ref_f # ะ ะตัะตัะตะฝัั ะ
return JsonResponse(result)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะะพัะตัะตะฝะธั ะฟะพ ะฝะฐะฟัะฐะฒะปะตะฝะธัะผ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน")
def directions_services(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
f = False
dn = Napravleniya.objects.filter(pk=pk)
if dn.exists():
n = dn[0]
if Issledovaniya.objects.filter(
Q(research__is_paraclinic=True) | Q(research__is_doc_refferal=True) | Q(research__is_microbiology=True) | Q(research__is_citology=True) | Q(research__is_gistology=True)
).exists():
cdid, ctime, ctp, rt = get_reset_time_vars(n)
response["ok"] = True
researches = []
has_microbiology = False
receive_datetime = None
for i in (
Issledovaniya.objects.filter(napravleniye=n)
.filter(
Q(research__is_paraclinic=True) | Q(research__is_doc_refferal=True) | Q(research__is_microbiology=True) | Q(research__is_citology=True) | Q(research__is_gistology=True)
)
.distinct()
):
researches.append(
{
"pk": i.pk,
"title": i.research.title,
"department": "" if not i.research.podrazdeleniye else i.research.podrazdeleniye.get_title(),
"is_microbiology": i.research.is_microbiology,
"comment": i.localization.title if i.localization else i.comment,
"tube": {"title": i.research.microbiology_tube.title, "color": i.research.microbiology_tube.color, "pk": i.pk} if i.research.is_microbiology else None,
}
)
if i.research.is_microbiology:
has_microbiology = True
if has_microbiology:
receive_datetime = None if not n.time_microbiology_receive else strdatetime(n.time_microbiology_receive)
response["direction_data"] = {
"date": strdate(n.data_sozdaniya),
"client": n.client.individual.fio(full=True),
"card": n.client.number_with_type(),
"diagnos": n.diagnos,
"has_microbiology": has_microbiology,
"receive_datetime": receive_datetime,
"doc": "" if not n.doc else "{}, {}".format(n.doc.get_fio(), n.doc.podrazdeleniye.title),
"imported_from_rmis": n.imported_from_rmis,
"imported_org": "" if not n.imported_org else n.imported_org.title,
"visit_who_mark": "" if not n.visit_who_mark else "{}, {}".format(n.visit_who_mark.get_fio(), n.visit_who_mark.podrazdeleniye.title),
"fin_source": "" if not n.istochnik_f else "{} - {}".format(n.istochnik_f.base.title, n.istochnik_f.title),
}
response["researches"] = researches
response["loaded_pk"] = pk
response["visit_status"] = n.visit_date is not None
response["visit_date"] = "" if not n.visit_date else strdatetime(n.visit_date)
response["allow_reset_confirm"] = bool(
(
(ctime - ctp < rt and cdid == request.user.doctorprofile.pk)
or request.user.is_superuser
or "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธะน ัะตะทัะปััะฐัะพะฒ" in [str(x) for x in request.user.groups.all()]
)
and n.visit_date
)
f = True
if not f:
response["message"] = "ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฝะฐะนะดะตะฝะพ"
return JsonResponse(response)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะะพัะตัะตะฝะธั ะฟะพ ะฝะฐะฟัะฐะฒะปะตะฝะธัะผ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน")
def directions_mark_visit(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
cancel = request_data.get("cancel", False)
dn = Napravleniya.objects.filter(pk=pk)
f = False
if dn.exists():
n = dn[0]
if Issledovaniya.objects.filter(Q(research__is_paraclinic=True) | Q(research__is_doc_refferal=True)).exists():
if not cancel:
n.visit_date = timezone.now()
n.visit_who_mark = request.user.doctorprofile
n.save()
cdid, ctime, ctp, rt = get_reset_time_vars(n)
allow_reset_confirm = bool(
(
(ctime - ctp < rt and cdid == request.user.doctorprofile.pk)
or request.user.is_superuser
or "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธะน ัะตะทัะปััะฐัะพะฒ" in [str(x) for x in request.user.groups.all()]
)
and n.visit_date
)
response["visit_status"] = n.visit_date is not None
response["visit_date"] = strdatetime(n.visit_date)
response["allow_reset_confirm"] = allow_reset_confirm
response["ok"] = True
else:
ctp = int(0 if not n.visit_date else int(time.mktime(timezone.localtime(n.visit_date).timetuple())))
ctime = int(time.time())
cdid = -1 if not n.visit_who_mark else n.visit_who_mark_id
rtm = SettingManager.get("visit_reset_time_min", default="20.0", default_type='f')
rt = rtm * 60
allow_reset_confirm = bool(
(
(ctime - ctp < rt and cdid == request.user.doctorprofile.pk)
or request.user.is_superuser
or "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธะน ัะตะทัะปััะฐัะพะฒ" in [str(x) for x in request.user.groups.all()]
)
and n.visit_date
)
if allow_reset_confirm:
response["ok"] = True
response["visit_status"] = None
response["visit_date"] = ''
response["allow_reset_confirm"] = False
n.visit_date = None
n.visit_who_mark = None
n.save()
else:
response["message"] = "ะัะผะตะฝะฐ ะฟะพัะตัะตะฝะธั ะฒะพะทะผะพะถะฝะฐ ัะพะปัะบะพ ะฒ ัะตัะตะฝะธะธ {} ะผะธะฝ.".format(rtm)
Log(key=pk, type=5001, body=json.dumps({"ะะพัะตัะตะฝะธะต": "ะพัะผะตะฝะฐ" if cancel else "ะดะฐ", "ะะฐัะฐ ะธ ะฒัะตะผั": response["visit_date"]}), user=request.user.doctorprofile).save()
f = True
if not f:
response["message"] = "ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฝะฐะนะดะตะฝะพ"
return JsonResponse(response)
@group_required("ะะพะปััะฐัะตะปั ะฑะธะพะผะฐัะตัะธะฐะปะฐ ะผะธะบัะพะฑะธะพะปะพะณะธะธ")
def directions_receive_material(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1)
cancel = request_data.get("cancel", False)
dn = Napravleniya.objects.filter(pk=pk)
f = False
if dn.exists():
n = dn[0]
if not cancel:
if not n.time_microbiology_receive:
n.time_microbiology_receive = timezone.now()
n.doc_microbiology_receive = request.user.doctorprofile
n.save()
response["ok"] = True
response["receive_datetime"] = strdatetime(n.time_microbiology_receive)
else:
response["message"] = "ะะฐัะตัะธะฐะป ัะถะต ะฟัะธะฝัั"
else:
n.time_microbiology_receive = None
n.doc_microbiology_receive = None
n.save()
response["ok"] = True
response["receive_datetime"] = None
f = True
if not f:
response["message"] = "ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฝะฐะนะดะตะฝะพ"
return JsonResponse(response)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะะพัะตัะตะฝะธั ะฟะพ ะฝะฐะฟัะฐะฒะปะตะฝะธัะผ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน")
def directions_visit_journal(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
for v in Napravleniya.objects.filter(
visit_date__range=(
date_start,
date_end,
),
visit_who_mark=request.user.doctorprofile,
).order_by("-visit_date"):
response["data"].append({"pk": v.pk, "client": v.client.individual.fio(full=True), "card": v.client.number_with_type(), "datetime": strdatetime(v.visit_date)})
return JsonResponse(response)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะะพัะตัะตะฝะธั ะฟะพ ะฝะฐะฟัะฐะฒะปะตะฝะธัะผ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน")
def directions_recv_journal(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
for v in Napravleniya.objects.filter(
time_microbiology_receive__range=(
date_start,
date_end,
),
doc_microbiology_receive=request.user.doctorprofile,
).order_by("-time_microbiology_receive"):
tubes = []
for i in Issledovaniya.objects.filter(napravleniye=v, research__microbiology_tube__isnull=False):
tube = i.research.microbiology_tube
tubes.append(
{
"color": tube.color,
"title": tube.title,
}
)
response["data"].append(
{
"pk": v.pk,
"client": v.client.individual.fio(full=True),
"datetime": strdatetime(v.time_microbiology_receive),
"tubes": tubes,
}
)
return JsonResponse(response)
@login_required
def directions_last_result(request):
response = {"ok": False, "data": {}, "type": "result", "has_last_result": False}
request_data = json.loads(request.body)
individual = request_data.get("individual", -1)
research = request_data.get("research", -1)
parent_iss = request_data.get("parentIss", None)
filter = {
"napravleniye__client__individual__pk": individual,
"research__pk": research,
}
if parent_iss:
filter["napravleniye__parent__pk"] = parent_iss
i = Issledovaniya.objects.filter(**filter, time_confirmation__isnull=False).order_by("-time_confirmation").first()
u = Issledovaniya.objects.filter(**filter, time_confirmation__isnull=True).order_by("-napravleniye__data_sozdaniya").first()
v = (
Issledovaniya.objects.filter(**filter, research__is_paraclinic=True, time_confirmation__isnull=True, napravleniye__visit_date__isnull=False)
.order_by("-napravleniye__visit_date")
.first()
)
if i:
if not u or i.time_confirmation >= u.napravleniye.data_sozdaniya:
response["ok"] = True
if v and v.napravleniye.visit_date > i.time_confirmation:
response["type"] = "visit"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(v.napravleniye.visit_date), "is_desc": i.research.desc, "ts": tsdatetime(v.napravleniye.visit_date)}
response["has_last_result"] = True
response["last_result"] = {
"direction": i.napravleniye_id,
"datetime": strdate(i.time_confirmation),
"ts": tsdatetime(i.time_confirmation),
"is_desc": i.research.desc,
"is_doc_referral": i.research.is_doc_referral,
"is_paraclinic": i.research.is_paraclinic,
}
else:
response["data"] = {
"direction": i.napravleniye_id,
"datetime": strdate(i.time_confirmation),
"is_desc": i.research.desc,
"is_doc_referral": i.research.is_doc_referral,
"ts": tsdatetime(i.time_confirmation),
"is_paraclinic": i.research.is_paraclinic,
}
elif u:
response["ok"] = True
if v and v.napravleniye.visit_date > u.napravleniye.data_sozdaniya:
response["type"] = "visit"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(v.napravleniye.visit_date), "is_desc": i.research.desc, "ts": tsdatetime(v.napravleniye.visit_date)}
else:
response["type"] = "direction"
response["data"] = {
"direction": u.napravleniye_id,
"datetime": strdate(u.napravleniye.data_sozdaniya),
"is_desc": i.research.desc,
"ts": tsdatetime(u.napravleniye.data_sozdaniya),
}
response["has_last_result"] = True
response["last_result"] = {
"direction": i.napravleniye_id,
"datetime": strdate(i.time_confirmation),
"is_doc_referral": i.research.is_doc_referral,
"ts": tsdatetime(i.time_confirmation),
"is_paraclinic": i.research.is_paraclinic,
}
elif u:
response["ok"] = True
if v and v.napravleniye.visit_date > u.napravleniye.data_sozdaniya:
response["type"] = "visit"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(v.napravleniye.visit_date), "ts": tsdatetime(v.napravleniye.visit_date)}
else:
response["type"] = "direction"
response["data"] = {"direction": u.napravleniye_id, "datetime": strdate(u.napravleniye.data_sozdaniya), "ts": tsdatetime(u.napravleniye.data_sozdaniya)}
return JsonResponse(response)
@login_required
def directions_results_report(request):
import re
data = []
request_data = json.loads(request.body)
individual_pk = request_data.get("individual", -1)
Log(key=str(individual_pk), type=20000, body=json.dumps(request_data), user=request.user.doctorprofile).save()
params = request_data.get("params", [])
date_start, date_end = try_parse_range(request_data.get("date_start"), request_data.get("date_end"))
if Individual.objects.filter(pk=individual_pk).exists():
i = Individual.objects.get(pk=individual_pk)
for param in params:
ppk = param["pk"]
if param["is_paraclinic"]:
if ParaclinicInputGroups.objects.filter(pk=ppk).exists():
g = ParaclinicInputGroups.objects.get(pk=ppk)
for i in Issledovaniya.objects.filter(research__paraclinicinputgroups=g, time_confirmation__isnull=False):
res = []
for r in ParaclinicResult.objects.filter(field__group=g, issledovaniye=i).order_by("field__order"):
if r.value == "":
continue
res.append((r.field.get_title(force_type=r.get_field_type()) + ": " if r.field.get_title(force_type=r.get_field_type()) != "" else "") + r.value)
if len(res) == 0:
continue
paramdata = {
"research": i.research_id,
"pk": ppk,
"order": g.order,
"date": strdate(i.time_confirmation),
"timestamp": tsdatetime(i.time_confirmation),
"value": "; ".join(res),
"units": "",
"is_norm": "normal",
"not_norm_dir": "",
# "delta": 0,
"active_ref": None,
"direction": i.napravleniye_id,
}
data.append(paramdata)
else:
if Fractions.objects.filter(pk=ppk).exists():
f = Fractions.objects.get(pk=ppk)
for r in Result.objects.filter(issledovaniye__napravleniye__client__individual=i, fraction=f, issledovaniye__time_confirmation__range=(date_start, date_end)):
if r.value == "":
continue
is_norm, ref_sign = r.get_is_norm()
not_norm_dir = ""
# delta = ""
active_ref = r.calc_normal(fromsave=False, only_ref=True)
if isinstance(active_ref, str) and re.match(r"^\d+(\.\d+)?$", r.value.replace(",", ".").strip()):
# x = float(r.value.replace(",", ".").strip())
r1, r2 = r.calc_normal(fromsave=False, only_ref=False)
if r1 and r2:
if r1 == 'not_normal':
not_norm_dir = {'<': "n_down", ">": "n_up"}.get(r2, "")
# if spl[0] >= x:
# not_norm_dir = "down"
# nx = spl[0] - x
# n10 = spl[0] * 0.2
# if nx <= n10:
# not_norm_dir = "n_down"
# delta = nx
# elif spl[1] <= x:
# not_norm_dir = "up"
# nx = x - spl[1]
# n10 = spl[1] * 0.2
# if nx <= n10:
# not_norm_dir = "n_up"
# delta = nx
paramdata = {
"research": f.research_id,
"pk": ppk,
"order": f.sort_weight,
"date": strdate(r.issledovaniye.time_confirmation),
"timestamp": tsdatetime(r.issledovaniye.time_confirmation),
"value": r.value,
"units": r.get_units(),
"is_norm": is_norm,
"not_norm_dir": not_norm_dir,
# "delta": delta,
"active_ref": active_ref,
"direction": r.issledovaniye.napravleniye_id,
}
data.append(paramdata)
data.sort(key=itemgetter("timestamp"), reverse=True)
data.sort(key=itemgetter("pk"))
data.sort(key=itemgetter("order"))
data.sort(key=itemgetter("research"))
return JsonResponse({"data": data})
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะัะฐั ััะฐัะธะพะฝะฐัะฐ", "t, ad, p", "ะะฐะฟะพะปะฝะตะฝะธะต ะผะพะฝะธัะพัะธะฝะณะพะฒ", "ะกะฒะธะดะตัะตะปัััะฒะพ ะพ ัะผะตััะธ-ะดะพัััะฟ")
def directions_paraclinic_form(request):
TADP = SettingManager.get("tadp", default='ะขะตะผะฟะตัะฐัััะฐ', default_type='s')
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("pk", -1) or -1
by_issledovaniye = request_data.get("byIssledovaniye", False)
force_form = request_data.get("force", False)
without_issledovaniye = request_data.get("withoutIssledovaniye", None)
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
add_fr = {}
f = False
g = [str(x) for x in request.user.groups.all()]
is_without_limit_paraclinic = "ะะฐัะฐะบะปะธะฝะธะบะฐ ะฑะตะท ะพะณัะฐะฝะธัะตะฝะธะน" in g
if not request.user.is_superuser and not is_without_limit_paraclinic:
add_fr = dict(research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
if by_issledovaniye:
if Issledovaniya.objects.filter(pk=pk, research__is_microbiology=True).exists():
pk = Issledovaniya.objects.get(pk=pk).napravleniye_id
else:
pk = -1
dn = (
Napravleniya.objects.filter(pk=pk)
.select_related('client', 'client__base', 'client__individual', 'doc', 'doc__podrazdeleniye')
.prefetch_related(
Prefetch(
'issledovaniya_set',
queryset=(
Issledovaniya.objects.all()
if force_form
else Issledovaniya.objects.filter(
Q(research__is_paraclinic=True, **add_fr)
| Q(research__is_doc_refferal=True)
| Q(research__is_treatment=True)
| Q(research__is_stom=True)
| Q(research__is_microbiology=True)
| Q(research__is_citology=True)
| Q(research__is_gistology=True)
| Q(research__is_form=True)
| Q(research__is_monitoring=True)
| Q(research__is_expertise=True)
)
)
.select_related('research', 'research__microbiology_tube', 'research__podrazdeleniye')
.prefetch_related(
Prefetch(
'research__paraclinicinputgroups_set',
queryset=ParaclinicInputGroups.objects.filter(hide=False)
.order_by("order")
.prefetch_related(Prefetch('paraclinicinputfield_set', queryset=ParaclinicInputField.objects.filter(hide=False).order_by("order"))),
),
Prefetch('recipe_set', queryset=Recipe.objects.all().order_by('pk')),
)
.distinct(),
)
)
)
d = None
if dn.exists():
d: Napravleniya = dn[0]
df = d.issledovaniya_set.all()
if df.exists():
response["ok"] = True
response["has_doc_referral"] = False
response["has_expertise"] = False
response["has_paraclinic"] = False
response["has_microbiology"] = False
response["has_citology"] = False
response["has_gistology"] = False
response["has_monitoring"] = False
response["card_internal"] = d.client.base.internal_type
response["hospital_title"] = d.hospital_title
card_documents = d.client.get_card_documents()
snils_types = [x.pk for x in DocumentType.objects.filter(title='ะกะะะะก')]
snils_numbers = {x: card_documents[x] for x in snils_types if card_documents.get(x)}
response["patient"] = {
"fio_age": d.client.individual.fio(full=True),
"fio": d.client.individual.fio(),
"age": d.client.individual.age(),
"sex": d.client.individual.sex.lower(),
"card": d.client.number_with_type(),
"card_pk": d.client_id,
"pk": d.client_id,
"individual_pk": d.client.individual_id,
"has_dreg": DispensaryReg.objects.filter(date_end__isnull=True, card=d.client).exists(),
"has_benefit": BenefitReg.objects.filter(date_end__isnull=True, card=d.client).exists(),
"doc": "" if not d.doc else (d.doc.get_fio(dots=True) + ", " + d.doc.podrazdeleniye.title),
"imported_from_rmis": d.imported_from_rmis,
"imported_org": "" if not d.imported_org else d.imported_org.title,
"base": d.client.base_id,
"main_diagnosis": d.client.main_diagnosis,
"has_snils": bool(snils_numbers),
}
response["direction"] = {
"pk": d.pk,
"date": strdate(d.data_sozdaniya),
"all_confirmed": d.is_all_confirm(),
"diagnos": d.diagnos,
"fin_source": d.fin_title,
"fin_source_id": d.istochnik_f_id,
"tube": None,
"amd": d.amd_status,
"amd_number": d.amd_number,
"hospitalTFOMSCode": d.get_hospital_tfoms_id(),
}
response["researches"] = []
tube = None
medical_certificates = []
tmp_certificates = []
i: Issledovaniya
for i in df:
if i.research.is_doc_refferal:
response["has_doc_referral"] = True
if i.research.is_expertise:
response["has_expertise"] = True
if i.research.is_paraclinic or i.research.is_citology or i.research.is_gistology:
response["has_paraclinic"] = True
if i.research.is_microbiology:
response["has_microbiology"] = True
if i.research.is_citology:
response["has_citology"] = True
if i.research.is_gistology:
response["has_gistology"] = True
if i.research.is_monitoring:
response["has_monitoring"] = True
if i.research.microbiology_tube:
tube = {
"type": i.research.microbiology_tube.title,
"color": i.research.microbiology_tube.color,
"get": i.get_visit_date(force=True),
"n": d.microbiology_n,
"pk": i.pk,
}
transfer_d = Napravleniya.objects.filter(parent_auto_gen=i, cancel=False).first()
forbidden_edit = forbidden_edit_dir(d.pk)
more_forbidden = "ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ" not in g and "ะัะฐั ะบะพะฝััะปััะฐัะธะน" not in g and "ะัะฐั ััะฐัะธะพะฝะฐัะฐ" not in g and "t, ad, p" in g
cert_researches = ResearchesCertificate.objects.filter(research=i.research)
general_certificate = MedicalCertificates.objects.filter(general=True)
for cert in cert_researches:
if cert.medical_certificate.certificate_form not in tmp_certificates:
tmp_certificates.append(cert.medical_certificate.certificate_form)
medical_certificates.append({"form": cert.medical_certificate.certificate_form, "title": cert.medical_certificate.title})
for cert in general_certificate:
medical_certificates.append({"form": cert.certificate_form, "title": cert.title})
iss = {
"pk": i.pk,
"research": {
"pk": i.research_id,
"title": i.research.title,
"version": i.pk * 10000,
"is_paraclinic": i.research.is_paraclinic or i.research.is_citology or i.research.is_gistology,
"is_doc_refferal": i.research.is_doc_refferal,
"is_microbiology": i.research.is_microbiology,
"is_treatment": i.research.is_treatment,
"is_stom": i.research.is_stom,
"is_monitoring": i.research.is_monitoring,
"wide_headers": i.research.wide_headers,
"comment": i.localization.title if i.localization else i.comment,
"groups": [],
"can_transfer": i.research.can_transfer,
"is_extract": i.research.is_extract,
"transfer_direction": None if not transfer_d else transfer_d.pk,
"transfer_direction_iss": [] if not transfer_d else [r.research.title for r in Issledovaniya.objects.filter(napravleniye=transfer_d.pk)],
"r_type": i.research.r_type,
"show_more_services": i.research.show_more_services and not i.research.is_form and not i.research.is_microbiology,
"enabled_add_files": i.research.enabled_add_files,
},
"pacs": None if not i.research.podrazdeleniye or not i.research.podrazdeleniye.can_has_pacs else search_dicom_study(d.pk),
"examination_date": i.get_medical_examination(),
"templates": [],
"saved": i.time_save is not None,
"confirmed": i.time_confirmation is not None,
"confirmed_at": None if not i.time_confirmation else time.mktime(timezone.localtime(i.time_confirmation).timetuple()),
"allow_reset_confirm": i.allow_reset_confirm(request.user) and (not more_forbidden or TADP in i.research.title),
"more": [x.research_id for x in Issledovaniya.objects.filter(parent=i)],
"sub_directions": [],
"recipe": [],
"lab_comment": i.lab_comment,
"forbidden_edit": forbidden_edit,
"maybe_onco": i.maybe_onco,
"work_by": None,
"tube": tube,
"procedure_list": [],
"is_form": i.research.is_form,
"children_directions": [
{"pk": x.pk, "services": [y.research.get_title() for y in Issledovaniya.objects.filter(napravleniye=x)]} for x in Napravleniya.objects.filter(parent=i)
],
"parentDirection": None
if not i.napravleniye.parent
else {
"pk": i.napravleniye.parent.napravleniye_id,
"service": i.napravleniye.parent.research.get_title(),
"is_hospital": i.napravleniye.parent.research.is_hospital,
},
"whoSaved": None if not i.doc_save or not i.time_save else f"{i.doc_save}, {strdatetime(i.time_save)}",
"whoConfirmed": (None if not i.doc_confirmation or not i.time_confirmation else f"{i.doc_confirmation}, {strdatetime(i.time_confirmation)}"),
"whoExecuted": None if not i.time_confirmation or not i.executor_confirmation else str(i.executor_confirmation),
"countFiles": IssledovaniyaFiles.objects.filter(issledovaniye_id=i.pk).count(),
}
if i.research.is_microbiology:
conclusion_default = []
culture_default = []
iss["microbiology"] = {
"bacteries": [],
"conclusion": i.microbiology_conclusion or "",
"conclusionTemplates": [x for x in [*i.research.bac_conclusion_templates.split('|'), *conclusion_default] if x],
"cultureCommentsTemplates": [x for x in [*i.research.bac_culture_comments_templates.split('|'), *culture_default] if x],
}
for br in MicrobiologyResultCulture.objects.filter(issledovaniye=i):
bactery = {
"resultPk": br.pk,
"bacteryPk": br.culture.pk,
"bacteryTitle": br.culture.title,
"bacteryGroupTitle": br.culture.group_culture.title if br.culture.group_culture else '',
"koe": br.koe,
"comments": br.comments,
"antibiotics": [],
"selectedGroup": {},
"selectedAntibiotic": {},
"selectedSet": {},
}
for ar in MicrobiologyResultCultureAntibiotic.objects.filter(result_culture=br):
bactery["antibiotics"].append(
{
"pk": ar.antibiotic.pk,
"amount": ar.antibiotic_amount,
"resultPk": ar.pk,
"sri": ar.sensitivity,
"dia": ar.dia,
}
)
iss["microbiology"]["bacteries"].append(bactery)
if not force_form:
for sd in Napravleniya.objects.filter(parent=i):
iss["sub_directions"].append(
{
"pk": sd.pk,
"cancel": sd.cancel,
"researches": [x.research.title for x in Issledovaniya.objects.filter(napravleniye=sd)],
}
)
for procedure in ProcedureList.objects.filter(diary=d, cancel=False).distinct():
drug = procedure.drug
procedure_times = ProcedureListTimes.objects.filter(prescription=procedure).order_by("-times_medication")
times = []
if procedure_times.exists():
pt_orig = procedure_times[0]
for pt in ProcedureListTimes.objects.filter(prescription=procedure, times_medication__date=pt_orig.times_medication.date()).order_by('times_medication'):
t = pt.times_medication.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%H:%M")
if t not in times:
times.append(t)
date_start = procedure.date_start.strftime("%Y-%m-%d")
date_end = procedure.date_end.strftime("%d.%m.%Y")
count_days = len(list(date_iter_range(procedure.date_start, procedure.date_end)))
iss["procedure_list"].append(
{
"pk": procedure.pk,
"drug": str(drug),
"drugPk": drug.pk,
"form_release": procedure.form_release_id or -1,
"method": procedure.method_id or -1,
"dosage": procedure.dosage,
"units": procedure.units,
"comment": procedure.comment,
"timesSelected": list(reversed(times)),
"dateStart": date_start,
"step": procedure.step or 1,
"dateEnd": date_end,
"countDays": count_days,
}
)
if not force_form and iss["research"]["is_doc_refferal"]:
iss = {
**iss,
"purpose": i.purpose_id or -1,
"place": i.place_id or -1,
"fin_source": i.fin_source_id or ((i.napravleniye.istochnik_f_id or -1) if i.napravleniye else -1),
"first_time": i.first_time,
"result": i.result_reception_id or -1,
"outcome": i.outcome_illness_id or -1,
"diagnos": i.diagnos,
"purpose_list": non_selected_visible_type(VisitPurpose),
"fin_source_list": non_selected_visible_type(IstochnikiFinansirovaniya, {"base": i.napravleniye.client.base}) if i.napravleniye else [],
"place_list": non_selected_visible_type(Place),
"result_list": non_selected_visible_type(ResultOfTreatment),
"outcome_list": non_selected_visible_type(Outcomes),
}
if not force_form:
for rp in i.recipe_set.all():
iss["recipe"].append(
{
"pk": rp.pk,
"prescription": rp.drug_prescription,
"taking": rp.method_of_taking,
"comment": rp.comment,
}
)
ParaclinicTemplateName.make_default(i.research)
rts = ParaclinicTemplateName.objects.filter(research=i.research, hide=False)
for rt in rts.order_by('title'):
iss["templates"].append(
{
"pk": rt.pk,
"title": rt.title,
}
)
for group in i.research.paraclinicinputgroups_set.all():
g = {
"pk": group.pk,
"order": group.order,
"title": group.title,
"show_title": group.show_title,
"hide": group.hide,
"display_hidden": False,
"fields": [],
"visibility": group.visibility,
}
for field in group.paraclinicinputfield_set.all():
result_field: ParaclinicResult = ParaclinicResult.objects.filter(issledovaniye=i, field=field).first()
field_type = field.field_type if not result_field else result_field.get_field_type()
values_to_input = ([] if not field.required or field_type not in [10, 12] or i.research.is_monitoring else ['- ะะต ะฒัะฑัะฐะฝะพ']) + json.loads(field.input_templates)
value = (
((field.default_value if field_type not in [3, 11, 13, 14, 30] else '') if not result_field else result_field.value)
if field_type not in [1, 20]
else (get_default_for_field(field_type) if not result_field else result_field.value)
)
if field_type in [2, 32, 33, 34, 36] and isinstance(value, str) and value.startswith('%'):
value = ''
elif field_type in [10, 12] and not value and len(values_to_input) > 0 and field.required:
value = values_to_input[0]
g["fields"].append(
{
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"hide": field.hide,
"values_to_input": values_to_input,
"value": value,
"field_type": field_type,
"default_value": field.default_value,
"visibility": field.visibility,
"required": field.required,
"helper": field.helper,
"controlParam": field.control_param,
}
)
iss["research"]["groups"].append(g)
if not without_issledovaniye or iss['pk'] not in without_issledovaniye:
response["researches"].append(iss)
if not force_form and response["has_doc_referral"]:
response["anamnesis"] = d.client.anamnesis_of_life
d1, d2 = start_end_year()
disp_data = sql_func.dispensarization_research(d.client.individual.sex, d.client.individual.age_for_year(), d.client_id, d1, d2)
status_disp = 'finished'
if not disp_data:
status_disp = 'notneed'
else:
for disp_row in disp_data:
if not disp_row[4]:
status_disp = 'need'
break
response["status_disp"] = status_disp
response["disp_data"] = disp_data
response["medical_certificates"] = medical_certificates
f = True
hospital = d and d.get_hospital()
hospital_access = not hospital or hospital == request.user.doctorprofile.hospital or request.user.is_superuser
# TODO: ะดะปั ะฟะพะปะฝะพะณะพ ะทะฐะฟัะตัะฐ ะดะพัััะฟะฐ ะธะท ะดััะณะธั
ะพัะณะฐะฝะธะทะฐัะธะน ัะฑัะฐัั response.get("has_monitoring") (ัะฐะบ ะฟัะพะฒะตััะตััั ัะพะปัะบะพ ะดะปั ะผะพะฝะธัะพัะธะฝะณะพะฒ)
if response.get("has_monitoring") and not hospital_access:
return status_response(False, "ะะตั ะดะพัััะฟะฐ")
if not f:
response["message"] = "ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฝะฐะนะดะตะฝะพ"
return JsonResponse(response)
def get_default_for_field(field_type):
if field_type == 1:
return strfdatetime(current_time(), '%Y-%m-%d')
if field_type == 20:
return strfdatetime(current_time(), '%H:%M')
return ''
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะัะฐั ััะฐัะธะพะฝะฐัะฐ", "t, ad, p")
def directions_anesthesia_result(request):
response = {"ok": False, "message": ""}
rb = json.loads(request.body)
temp_result = rb.get("temp_result", {})
research_data = rb.get("research_data", {})
action = rb.get("action", "add")
result = ParaclinicResult.anesthesia_value_save(research_data['iss_pk'], research_data['field_pk'], temp_result, action)
if result:
response = {"ok": True, "message": ""}
return JsonResponse(response)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะัะฐั ััะฐัะธะพะฝะฐัะฐ", "t, ad, p")
def directions_anesthesia_load(request):
rb = json.loads(request.body)
research_data = rb.get("research_data", '')
if research_data is None:
return JsonResponse({'data': 'ะัะธะฑะบะฐ ะฒั
ะพะดะฝัั
ะดะฐะฝะฝัั
'})
anesthesia_data = ParaclinicResult.anesthesia_value_get(research_data['iss_pk'], research_data["field_pk"])
tb_data = []
row_category = {}
if anesthesia_data:
result = eval(anesthesia_data)
if isinstance(result, dict):
cols_template = [''] * (len(result['times']) + 1)
times_row = ['ะะฐัะฐะผะตัั']
times_row.extend(result['times'])
times_row.append('ะกัะผะผะฐ')
def made_structure(type):
for i in result[type]:
sum = 0
current_param = ['' for i in cols_template]
current_param[0] = i
for k, v in result[i].items():
if k in times_row:
index = times_row.index(k)
current_param[index] = v
if type in ['potent_drugs', 'narcotic_drugs'] and v:
v = v.replace(',', '.')
if check_float_is_valid(v):
sum += float(v)
current_param.append(round(sum, 4) or '')
current_param_temp = set([current_param[i] for i in range(1, len(current_param))])
if len(current_param_temp) == 1 and '' in current_param_temp:
continue
tb_data.append(current_param)
row_category[len(tb_data) - 1] = type
tb_data.append(times_row)
made_structure('patient_params')
made_structure('potent_drugs')
made_structure('narcotic_drugs')
return JsonResponse({'data': tb_data, 'row_category': row_category})
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะัะฐั ััะฐัะธะพะฝะฐัะฐ", "t, ad, p", "ะะฐะฟะพะปะฝะตะฝะธะต ะผะพะฝะธัะพัะธะฝะณะพะฒ", "ะกะฒะธะดะตัะตะปัััะฒะพ ะพ ัะผะตััะธ-ะดะพัััะฟ")
def directions_paraclinic_result(request):
TADP = SettingManager.get("tadp", default='ะขะตะผะฟะตัะฐัััะฐ', default_type='s')
response = {
"ok": False,
"message": "",
"execData": {
"whoSaved": None,
"whoConfirmed": None,
"whoExecuted": None,
},
}
rb = json.loads(request.body)
request_data = rb.get("data", {})
pk = request_data.get("pk", -1)
stationar_research = request_data.get("stationar_research", -1)
with_confirm = rb.get("with_confirm", False)
visibility_state = rb.get("visibility_state", {})
v_g = visibility_state.get("groups", {})
v_f = visibility_state.get("fields", {})
recipe = request_data.get("recipe", [])
procedure_list = request_data.get("procedure_list", [])
tube = request_data.get("direction", {}).get("tube", {})
force = rb.get("force", False)
diss = Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True)
if (
force
or diss.filter(
Q(research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
| Q(research__is_doc_refferal=True)
| Q(research__is_treatment=True)
| Q(research__is_gistology=True)
| Q(research__is_stom=True)
| Q(research__is_microbiology=True)
| Q(research__is_form=True)
| Q(research__is_monitoring=True)
| Q(research__is_expertise=True)
).exists()
or request.user.is_staff
):
iss = Issledovaniya.objects.get(pk=pk)
g = [str(x) for x in request.user.groups.all()]
tadp = TADP in iss.research.title
more_forbidden = "ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ" not in g and "ะัะฐั ะบะพะฝััะปััะฐัะธะน" not in g and "ะัะฐั ััะฐัะธะพะฝะฐัะฐ" not in g and "t, ad, p" in g
if not iss.research.is_expertise and (forbidden_edit_dir(iss.napravleniye_id) or (more_forbidden and not tadp)):
response["message"] = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะทะฐะฟัะตัะตะฝะพ"
return JsonResponse(response)
if procedure_list:
with transaction.atomic():
for proc_data in procedure_list:
if not iss.napravleniye or not iss.napravleniye.parent or proc_data.get('remove'):
continue
user_timezone = pytz.timezone(TIME_ZONE)
history = iss.napravleniye.parent.napravleniye
diary = iss.napravleniye
card = iss.napravleniye.client
drug = Drugs.objects.get(pk=proc_data["drugPk"])
form_release = FormRelease.objects.filter(pk=proc_data["form_release"]).first()
if not form_release:
response["message"] = f"ะฃ ะฝะฐะทะฝะฐัะตะฝะธั {drug} ะฝะต ะทะฐะฟะพะปะฝะตะฝะฐ ัะพัะผะฐ ะฒัะฟััะบะฐ"
return JsonResponse(response)
method = MethodsReception.objects.filter(pk=proc_data["method"]).first()
if not form_release:
response["message"] = f"ะฃ ะฝะฐะทะฝะฐัะตะฝะธั {drug} ะฝะต ะทะฐะฟะพะปะฝะตะฝ ัะฟะพัะพะฑ ะฟัะธัะผะฐ"
return JsonResponse(response)
dosage = proc_data["dosage"]
if not form_release:
response["message"] = f"ะฃ ะฝะฐะทะฝะฐัะตะฝะธั {drug} ะฝะต ะทะฐะฟะพะปะฝะตะฝะฐ ะดะพะทะธัะพะฒะบะฐ"
return JsonResponse(response)
units = proc_data.get("units", "")
if not units:
response["message"] = f"ะฃ ะฝะฐะทะฝะฐัะตะฝะธั {drug} ะฝะต ะฒัะฑัะฐะฝั ะตะดะธะฝะธัั ะธะทะผะตัะตะฝะธั"
return JsonResponse(response)
times = proc_data["timesSelected"]
if not times:
response["message"] = f"ะฃ ะฝะฐะทะฝะฐัะตะฝะธั {drug} ะฝะต ะฒัะฑัะฐะฝะพ ะฝะธ ะพะดะฝะพะณะพ ะฒัะตะผะตะฝะธ ะฟัะธัะผะฐ"
return JsonResponse(response)
comment = proc_data.get("comment", "")
date_start = try_strptime(proc_data['dateStart'], ('%d.%m.%Y', '%Y-%m-%d')).astimezone(user_timezone)
step = int(proc_data['step'])
if step < 1:
step = 1
elif step > 5:
step = 5
date_end = try_strptime(proc_data['dateEnd'], ('%d.%m.%Y', '%Y-%m-%d')).astimezone(user_timezone)
parent_child_data = rb.get('parent_child_data', None)
if proc_data.get('isNew') and parent_child_data:
iss_hosp = Issledovaniya.objects.get(napravleniye_id=parent_child_data['current_direction'])
proc_obj = ProcedureList(
research=iss_hosp.research,
history=history,
diary=diary,
card=card,
drug=drug,
form_release=form_release,
method=method,
dosage=dosage,
units=units,
comment=comment,
date_start=date_start,
step=step,
date_end=date_end,
doc_create=request.user.doctorprofile,
)
proc_obj.save()
else:
proc_obj = ProcedureList.objects.get(pk=proc_data["pk"])
proc_obj.form_release = form_release
proc_obj.method = method
proc_obj.dosage = dosage
proc_obj.units = units
proc_obj.comment = comment
proc_obj.date_start = date_start
proc_obj.step = step
proc_obj.date_end = date_end
proc_obj.cancel = False
proc_obj.who_cancel = None
proc_obj.save()
ProcedureListTimes.objects.filter(prescription=proc_obj, executor__isnull=True).delete()
for date in date_iter_range(date_start, date_end, step=step):
for pc_time in times:
times_medication = datetime.strptime(f"{date:%Y-%m-%d} {pc_time}", '%Y-%m-%d %H:%M').astimezone(user_timezone)
if not ProcedureListTimes.objects.filter(prescription=proc_obj, times_medication=times_medication).exists():
ProcedureListTimes.objects.create(prescription=proc_obj, times_medication=times_medication)
recipe_no_remove = []
for r in recipe:
if r.get("remove", False):
continue
if r.get("isNew", False):
rn = Recipe(issledovaniye=iss, drug_prescription=r["prescription"], method_of_taking=r["taking"], comment=r["comment"])
rn.save()
else:
rn = Recipe.objects.get(pk=r["pk"])
MethodsOfTaking.dec(rn.drug_prescription, rn.method_of_taking)
rn.drug_prescription = r["prescription"]
rn.method_of_taking = r["taking"]
rn.comment = r["comment"]
rn.save()
if rn.method_of_taking:
MethodsOfTaking.inc(rn.drug_prescription, rn.method_of_taking)
recipe_no_remove.append(rn.pk)
Recipe.objects.filter(issledovaniye=iss).exclude(pk__in=recipe_no_remove).delete()
if tube:
iss.napravleniye.microbiology_n = tube.get("n", "")
iss.napravleniye.save()
count = 0
date_death = None
for group in request_data["research"]["groups"]:
if not v_g.get(str(group["pk"]), True):
ParaclinicResult.objects.filter(issledovaniye=iss, field__group__pk=group["pk"]).delete()
continue
for field in group["fields"]:
if not v_f.get(str(field["pk"]), True):
ParaclinicResult.objects.filter(issledovaniye=iss, field__pk=field["pk"]).delete()
continue
if not ParaclinicInputField.objects.filter(pk=field["pk"]).exists():
continue
f = ParaclinicInputField.objects.get(pk=field["pk"])
if f.title == "ะะฐัะฐ ัะผะตััะธ":
date_death = datetime.strptime(field["value"], "%Y-%m-%d").date()
if f.field_type == 21:
continue
if not ParaclinicResult.objects.filter(issledovaniye=iss, field=f).exists():
f_result = ParaclinicResult(issledovaniye=iss, field=f, value="")
else:
f_result = ParaclinicResult.objects.filter(issledovaniye=iss, field=f)[0]
f_result.value = field["value"]
f_result.field_type = f.field_type
if f.field_type in [27, 28, 29, 32, 33, 34, 35]:
try:
val = json.loads(field["value"])
except:
val = {}
f_result.value_json = val
f_result.save()
if iss.research.is_monitoring:
if not MonitoringResult.objects.filter(issledovaniye=iss, research=iss.research, napravleniye=iss.napravleniye, field_id=field["pk"]).exists():
monitoring_result = MonitoringResult.objects.filter(issledovaniye=iss, research=iss.research, napravleniye=iss.napravleniye)[0]
monitoring_result.group_id = group['pk']
monitoring_result.group_order = group['order']
monitoring_result.field_order = field['order']
monitoring_result.field_id = field["pk"]
monitoring_result.value_text = ""
if count > 0:
monitoring_result.pk = None
else:
monitoring_result: MonitoringResult = MonitoringResult.objects.filter(issledovaniye=iss, research=iss.research, napravleniye=iss.napravleniye, field_id=field["pk"])[
0
]
monitoring_result.value_text = ""
if field['field_type'] == 18 or field['field_type'] == 3 or field['field_type'] == 19:
monitoring_result.value_aggregate = field["value"]
else:
monitoring_result.value_aggregate = None
monitoring_result.value_text = field["value"]
monitoring_result.field_type = field['field_type']
monitoring_result.save()
if f.field_type in [16, 17] and iss.napravleniye.parent and iss.napravleniye.parent.research.is_hospital:
try:
val = json.loads(str(field["value"]))
except Exception:
val = None
if f.field_type == 16:
if with_confirm:
if isinstance(val, list):
iss.napravleniye.parent.aggregate_lab = val
elif isinstance(val, dict) and val.get("directions"):
iss.napravleniye.parent.aggregate_lab = val["directions"]
else:
iss.napravleniye.parent.aggregate_lab = None
else:
iss.napravleniye.parent.aggregate_lab = None
elif f.field_type == 17:
if with_confirm:
if isinstance(val, list):
iss.napravleniye.parent.aggregate_desc = val
elif isinstance(val, dict) and val.get("directions"):
iss.napravleniye.parent.aggregate_desc = val["directions"]
else:
iss.napravleniye.parent.aggregate_desc = None
else:
iss.napravleniye.parent.aggregate_desc = None
iss.napravleniye.parent.save()
count += 1
iss.doc_save = request.user.doctorprofile
iss.time_save = timezone.now()
if iss.research.is_doc_refferal:
iss.medical_examination = request_data.get("examination_date") or timezone.now().date()
if with_confirm:
work_by = request_data.get("work_by")
if work_by and isinstance(work_by, str) and work_by.isdigit():
iss.doc_confirmation_id = work_by
iss.executor_confirmation = request.user.doctorprofile
else:
iss.doc_confirmation = request.user.doctorprofile
iss.time_confirmation = timezone.now()
if iss.napravleniye:
iss.napravleniye.qr_check_token = None
iss.napravleniye.save(update_fields=['qr_check_token'])
if date_death:
client_obj = iss.napravleniye.client
client_obj.death_date = date_death
client_obj.save()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = timezone.now()
iss.napravleniye.save()
if iss.research.is_microbiology:
mb = request_data.get("microbiology", {})
if mb:
iss.microbiology_conclusion = mb.get('conclusion')
has_bacteries = []
has_anti = []
for br in mb.get('bacteries', []):
if br['resultPk'] == -1:
bactery = MicrobiologyResultCulture(issledovaniye=iss, culture_id=br['bacteryPk'], koe=br['koe'])
else:
bactery = MicrobiologyResultCulture.objects.get(pk=br['resultPk'])
bactery.culture_id = br['bacteryPk']
bactery.koe = br['koe']
bactery.comments = br.get('comments', '')
bactery.save()
has_bacteries.append(bactery.pk)
for ar in br['antibiotics']:
if ar['resultPk'] == -1:
anti = MicrobiologyResultCultureAntibiotic(
result_culture=bactery,
antibiotic_id=ar['pk'],
sensitivity=ar['sri'],
dia=ar['dia'],
antibiotic_amount=ar.get('amount', ''),
)
else:
anti = MicrobiologyResultCultureAntibiotic.objects.get(pk=ar['resultPk'])
anti.antibiotic_id = ar['pk']
anti.sensitivity = ar['sri']
anti.dia = ar['dia']
anti.antibiotic_amount = ar.get('amount', '')
anti.save()
has_anti.append(anti.pk)
MicrobiologyResultCulture.objects.filter(issledovaniye=iss).exclude(pk__in=has_bacteries).delete()
MicrobiologyResultCultureAntibiotic.objects.filter(result_culture__issledovaniye=iss).exclude(pk__in=has_anti).delete()
iss.purpose_id = none_if_minus_1(request_data.get("purpose"))
iss.place_id = none_if_minus_1(request_data.get("place"))
iss.first_time = request_data.get("first_time", False)
iss.result_reception_id = none_if_minus_1(request_data.get("result"))
iss.outcome_illness_id = none_if_minus_1(request_data.get("outcome"))
iss.fin_source_id = none_if_minus_1(request_data.get("fin_source"))
iss.maybe_onco = request_data.get("maybe_onco", False)
iss.diagnos = request_data.get("diagnos", "")
iss.lab_comment = request_data.get("lab_comment", "")
if stationar_research != -1:
iss.gen_direction_with_research_after_confirm_id = stationar_research
iss.save()
more = request_data.get("more", [])
h = []
for m in more:
if not Issledovaniya.objects.filter(parent=iss, doc_save=request.user.doctorprofile, research_id=m):
i = Issledovaniya.objects.create(parent=iss, research_id=m)
i.doc_save = request.user.doctorprofile
i.time_save = timezone.now()
i.creator = request.user.doctorprofile
if with_confirm:
work_by = request_data.get("work_by")
if work_by and isinstance(work_by, str) and work_by.isdigit():
i.doc_confirmation_id = work_by
i.executor_confirmation = request.user.doctorprofile
else:
i.doc_confirmation = request.user.doctorprofile
i.time_confirmation = timezone.now()
if i.napravleniye:
i.napravleniye.qr_check_token = None
i.napravleniye.save(update_fields=['qr_check_token'])
i.save()
h.append(i.pk)
else:
for i2 in Issledovaniya.objects.filter(parent=iss, doc_save=request.user.doctorprofile, research_id=m):
i2.time_save = timezone.now()
if with_confirm:
work_by = request_data.get("work_by")
if work_by and isinstance(work_by, str) and work_by.isdigit():
i2.doc_confirmation_id = work_by
i2.executor_confirmation = request.user.doctorprofile
else:
i2.doc_confirmation = request.user.doctorprofile
i2.time_confirmation = timezone.now()
if i2.napravleniye:
i2.napravleniye.qr_check_token = None
i2.napravleniye.save(update_fields=['qr_check_token'])
i2.save()
h.append(i2.pk)
Issledovaniya.objects.filter(parent=iss).exclude(pk__in=h).delete()
response["ok"] = True
response["amd"] = iss.napravleniye.amd_status
response["amd_number"] = iss.napravleniye.amd_number
response["confirmed_at"] = None if not iss.time_confirmation else time.mktime(timezone.localtime(iss.time_confirmation).timetuple())
response["execData"] = {
"whoSaved": None if not iss.doc_save or not iss.time_save else f"{iss.doc_save}, {strdatetime(iss.time_save)}",
"whoConfirmed": (None if not iss.doc_confirmation or not iss.time_confirmation else f"{iss.doc_confirmation}, {strdatetime(iss.time_confirmation)}"),
"whoExecuted": None if not iss.time_confirmation or not iss.executor_confirmation else str(iss.executor_confirmation),
}
Log(key=pk, type=13, body="", user=request.user.doctorprofile).save()
if with_confirm:
if iss.napravleniye:
iss.napravleniye.send_task_result()
if stationar_research != -1:
iss.gen_after_confirm(request.user)
transfer_d = Napravleniya.objects.filter(parent_auto_gen=iss, cancel=False).first()
response["transfer_direction"] = None if not transfer_d else transfer_d.pk
response["transfer_direction_iss"] = [] if not transfer_d else [r.research.title for r in Issledovaniya.objects.filter(napravleniye=transfer_d.pk)]
if iss.maybe_onco:
card_pk = iss.napravleniye.client.pk
dstart_onco = strdate(current_time(only_date=True))
dispensery_onco = json.dumps(
{'card_pk': card_pk, 'pk': -1, 'data': {'date_start': dstart_onco, 'date_end': '', 'why_stop': '', 'close': False, 'diagnos': 'U999 ะะฝะบะพะฟะพะดะพะทัะตะฝะธะต', 'illnes': ''}}
)
dispensery_obj = HttpRequest()
dispensery_obj._body = dispensery_onco
dispensery_obj.user = request.user
save_dreg(dispensery_obj)
parent_child_data = rb.get('parent_child_data', None)
if parent_child_data:
parent = int(parent_child_data['parent_iss'])
if parent > -1:
parent_iss = Issledovaniya.objects.get(pk=parent)
Napravleniya.objects.filter(pk=parent_child_data['current_direction']).update(parent=parent_iss, cancel=False)
if parent == -1:
Napravleniya.objects.filter(pk=parent_child_data['current_direction']).update(parent=None)
parent = int(parent_child_data.get('current_iss', -1))
child = int(parent_child_data.get('child_iss', -1))
if parent > -1 and child > -1:
parent_iss = Issledovaniya.objects.get(pk=parent)
child_iss = Issledovaniya.objects.values_list('napravleniye_id').get(pk=child)
child_direction = Napravleniya.objects.get(pk=child_iss[0])
if child_direction.parent:
Napravleniya.objects.filter(pk=child_iss[0]).update(parent=parent_iss, cancel=False)
Log(key=pk, type=14, body="", user=request.user.doctorprofile).save()
forbidden_edit = forbidden_edit_dir(iss.napravleniye_id)
response["forbidden_edit"] = forbidden_edit or more_forbidden
response["soft_forbidden"] = not forbidden_edit
return JsonResponse(response)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะัะฐั ััะฐัะธะพะฝะฐัะฐ", "t, ad, p")
def directions_paraclinic_confirm(request):
TADP = SettingManager.get("tadp", default='ะขะตะผะฟะตัะฐัััะฐ', default_type='s')
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
diss = Issledovaniya.objects.filter(pk=pk, time_confirmation__isnull=True)
if diss.filter(
Q(research__podrazdeleniye=request.user.doctorprofile.podrazdeleniye)
| Q(research__is_doc_refferal=True)
| Q(research__is_treatment=True)
| Q(research__is_slave_hospital=True)
| Q(research__is_stom=True)
).exists():
iss = Issledovaniya.objects.get(pk=pk)
g = [str(x) for x in request.user.groups.all()]
tadp = TADP in iss.research.title
more_forbidden = "ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ" not in g and "ะัะฐั ะบะพะฝััะปััะฐัะธะน" not in g and "ะัะฐั ััะฐัะธะพะฝะฐัะฐ" not in g and "t, ad, p" in g
if forbidden_edit_dir(iss.napravleniye_id) or (more_forbidden and not tadp):
response["message"] = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะทะฐะฟัะตัะตะฝะพ"
return JsonResponse(response)
t = timezone.now()
if not iss.napravleniye.visit_who_mark or not iss.napravleniye.visit_date:
iss.napravleniye.visit_who_mark = request.user.doctorprofile
iss.napravleniye.visit_date = t
iss.napravleniye.save()
iss.doc_confirmation = request.user.doctorprofile
if iss.napravleniye:
iss.napravleniye.qr_check_token = None
iss.napravleniye.save(update_fields=['qr_check_token'])
iss.time_confirmation = t
iss.save()
iss.gen_after_confirm(request.user)
for i in Issledovaniya.objects.filter(parent=iss):
i.doc_confirmation = request.user.doctorprofile
i.time_confirmation = t
i.save()
if i.napravleniye:
i.napravleniye.qr_check_token = None
i.napravleniye.save(update_fields=['qr_check_token'])
if iss.napravleniye:
iss.napravleniye.send_task_result()
response["ok"] = True
response["amd"] = iss.napravleniye.amd_status
response["amd_number"] = iss.napravleniye.amd_number
response["forbidden_edit"] = forbidden_edit_dir(iss.napravleniye_id)
response["confirmed_at"] = None if not iss.time_confirmation else time.mktime(timezone.localtime(iss.time_confirmation).timetuple())
Log(key=pk, type=14, body=json.dumps(request_data), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required(
"ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธะน ัะตะทัะปััะฐัะพะฒ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะัะฐั ััะฐัะธะพะฝะฐัะฐ", "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธั ะฟะตัะตะฒะพะดะฝะพะณะพ ัะฟะธะบัะธะทะฐ", "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธั ะฒัะฟะธัะบะธ", "t, ad, p"
)
def directions_paraclinic_confirm_reset(request):
TADP = SettingManager.get("tadp", default='ะขะตะผะฟะตัะฐัััะฐ', default_type='s')
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
pk = request_data.get("iss_pk", -1)
if Issledovaniya.objects.filter(pk=pk).exists():
iss: Issledovaniya = Issledovaniya.objects.get(pk=pk)
is_transfer = iss.research.can_transfer
is_extract = iss.research.is_extract
g = [str(x) for x in request.user.groups.all()]
tadp = TADP in iss.research.title
more_forbidden = "ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ" not in g and "ะัะฐั ะบะพะฝััะปััะฐัะธะน" not in g and "ะัะฐั ััะฐัะธะพะฝะฐัะฐ" not in g and "t, ad, p" in g
allow_reset = iss.allow_reset_confirm(request.user) and (not more_forbidden or tadp)
if not allow_reset:
response["message"] = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะทะฐะฟัะตัะตะฝะพ. ะะฐะฟัะพัะธัะต ัะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธั ั ะฐะดะผะธะฝะธัััะฐัะพัะฐ"
return JsonResponse(response)
if allow_reset:
predoc = {"fio": iss.doc_confirmation_fio, "pk": iss.doc_confirmation_id, "direction": iss.napravleniye_id}
iss.doc_confirmation = iss.executor_confirmation = iss.time_confirmation = None
iss.n3_odii_uploaded_task_id = None
iss.save()
transfer_d = Napravleniya.objects.filter(parent_auto_gen=iss, cancel=False).first()
if transfer_d:
# transfer_d.cancel = True
transfer_d.save()
if iss.napravleniye.result_rmis_send:
c = Client()
c.directions.delete_services(iss.napravleniye, request.user.doctorprofile)
response["ok"] = True
for i in Issledovaniya.objects.filter(parent=iss):
i.doc_confirmation = None
i.executor_confirmation = None
i.time_confirmation = None
i.save()
if iss.napravleniye:
n: Napravleniya = iss.napravleniye
n.need_resend_amd = False
n.eds_total_signed = False
n.eds_total_signed_at = None
n.vi_id = None
n.save(update_fields=['eds_total_signed', 'eds_total_signed_at', 'need_resend_amd', 'vi_id'])
Log(key=pk, type=24, body=json.dumps(predoc), user=request.user.doctorprofile).save()
else:
response["message"] = "ะกะฑัะพั ะฟะพะดัะฒะตัะถะดะตะฝะธั ัะฐะทัะตัะตะฝ ะฒ ัะตัะตะฝะธะธ %s ะผะธะฝัั" % (str(SettingManager.get("lab_reset_confirm_time_min")))
response["amd"] = iss.napravleniye.amd_status
response["amd_number"] = iss.napravleniye.amd_number
response["is_transfer"] = is_transfer
response["is_extract"] = is_extract
if is_transfer:
response["forbidden_edit"] = forbidden_edit_dir(iss.napravleniye_id)
return JsonResponse(response)
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะะฐะฟะพะปะฝะตะฝะธะต ะผะพะฝะธัะพัะธะฝะณะพะฒ", "ะกะฒะธะดะตัะตะปัััะฒะพ ะพ ัะผะตััะธ-ะดะพัััะฟ")
def directions_paraclinic_history(request):
response = {"directions": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
has_dirs = []
for direction in (
Napravleniya.objects.filter(
Q(issledovaniya__doc_save=request.user.doctorprofile)
| Q(issledovaniya__doc_confirmation=request.user.doctorprofile)
| Q(issledovaniya__executor_confirmation=request.user.doctorprofile)
)
.filter(Q(issledovaniya__time_confirmation__range=(date_start, date_end)) | Q(issledovaniya__time_save__range=(date_start, date_end)))
.order_by("-issledovaniya__time_save", "-issledovaniya__time_confirmation")
):
if direction.pk in has_dirs:
continue
has_dirs.append(direction.pk)
d = {
"pk": direction.pk,
"date": strdate(direction.data_sozdaniya),
"patient": direction.client.individual.fio(full=True, direction=direction),
"card": direction.client.number_with_type(),
"iss": [],
"all_confirmed": True,
"all_saved": True,
"amd": direction.amd_status,
"amd_number": direction.amd_number,
}
for i in Issledovaniya.objects.filter(napravleniye=direction).order_by("pk"):
iss = {"title": i.research.get_title(), "saved": i.time_save is not None, "confirmed": i.time_confirmation is not None}
d["iss"].append(iss)
if not iss["saved"]:
d["all_saved"] = False
if not iss["confirmed"]:
d["all_confirmed"] = False
response["directions"].append(d)
return JsonResponse(response)
def directions_patient_history(request):
data = []
request_data = json.loads(request.body)
iss = Issledovaniya.objects.get(pk=request_data["pk"])
researches_pk = [iss.research.pk]
if iss.research.speciality:
reserches_speciality = list(Researches.objects.values_list('pk', flat=True).filter(speciality=iss.research.speciality))
if len(reserches_speciality) > 0:
researches_pk.extend(reserches_speciality)
filtered = Issledovaniya.objects.filter(time_confirmation__isnull=False, research_id__in=researches_pk, napravleniye__client__individual=iss.napravleniye.client.individual)
is_same_parent = request_data.get("isSameParent", False)
hospital_research = HospitalService.objects.filter(slave_research=iss.research).first()
site_type = -1
if hospital_research:
site_type = hospital_research.site_type
if is_same_parent and site_type == 1:
filtered = filtered.filter(napravleniye__parent=iss.napravleniye.parent)
for i in filtered.order_by('-time_confirmation').exclude(pk=request_data["pk"]):
data.append({"pk": i.pk, "direction": i.napravleniye_id, "date": strdate(i.time_confirmation) + ' ' + i.research.short_title + ' (' + i.doc_confirmation.get_fio() + ')'})
return JsonResponse({"data": data})
def directions_data_by_fields(request):
data = {}
request_data = json.loads(request.body)
i = Issledovaniya.objects.get(pk=request_data["pk"])
pk_dest = request_data.get("pk_dest", request_data["pk"])
i_dest = Issledovaniya.objects.get(pk=pk_dest)
if i.time_confirmation:
if i.research == i_dest.research:
for field in ParaclinicInputField.objects.filter(group__research=i.research, group__hide=False, hide=False):
if ParaclinicResult.objects.filter(issledovaniye=i, field=field).exists() and field.field_type != 30:
data[field.pk] = ParaclinicResult.objects.filter(issledovaniye=i, field=field)[0].value
return JsonResponse({"data": data})
else:
for field in ParaclinicInputField.objects.filter(group__research=i.research, group__hide=False, hide=False):
if ParaclinicResult.objects.filter(issledovaniye=i, field=field).exists():
for field_dest in ParaclinicInputField.objects.filter(group__research=i_dest.research, group__hide=False, hide=False):
if field_dest.attached and field_dest.attached == field.attached and field_dest.field_type != 30:
data[field_dest.pk] = ParaclinicResult.objects.filter(issledovaniye=i, field=field)[0].value
break
return JsonResponse({"data": data})
@login_required
def last_fraction_result(request):
request_data = json.loads(request.body)
client_pk = request_data["clientPk"]
fraction_pk = int(request_data["fractionPk"])
rows = get_fraction_result(client_pk, fraction_pk)
result = None
if rows:
row = rows[0]
result = {"direction": row[1], "date": row[4], "value": row[5]}
return JsonResponse({"result": result})
@login_required
def last_field_result(request):
request_data = {"fieldPk": "null", **json.loads(request.body)}
client_pk = request_data["clientPk"]
logical_or, logical_and, logical_group_or = False, False, False
field_is_link, field_is_aggregate_operation, field_is_aggregate_proto_description = False, False, False
field_pks, operations_data, aggregate_data = None, None, None
result = None
c = Card.objects.get(pk=client_pk)
data = c.get_data_individual()
mother_obj = None
mother_data = None
if c.mother:
mother_obj = c.mother
mother_data = mother_obj.get_data_individual()
if request_data["fieldPk"].find('%work_place') != -1:
if c.work_place:
work_place = c.work_place
elif c.work_place_db:
work_place = c.work_place_db.title
else:
work_place = ""
result = {"value": work_place}
elif request_data["fieldPk"].find('%hospital') != -1:
num_dir = get_current_direction(request_data["iss_pk"])
hosp_title = Napravleniya.objects.get(pk=num_dir).hospital_title
result = {"value": hosp_title}
elif request_data["fieldPk"].find('%parent_dir_data') != -1:
num_dir = get_current_direction(request_data["iss_pk"])
iss_parent = Napravleniya.objects.get(pk=num_dir).parent
research = iss_parent.research.title
direction_num = iss_parent.napravleniye_id
patient_data = f"ะะฐัะธะตะฝั-{data['fio']}. ะ/ั-{data['born']}. ะะพะปะธั-{data['enp']}. ะกะะะะก-{data['snils']}." f"\nะะพะบัะผะตะฝั-{research} โ-{direction_num}"
result = {"value": patient_data}
elif request_data["fieldPk"].find('%main_address') != -1:
result = {"value": c.main_address}
elif request_data["fieldPk"].find('%mother_full_main_address') != -1:
result = {"value": mother_obj.main_address_full}
elif request_data["fieldPk"].find('%full_main_address') != -1:
result = {"value": c.main_address_full}
elif request_data["fieldPk"].find('%docprofile') != -1:
result = {"value": request.user.doctorprofile.get_full_fio()}
elif request_data["fieldPk"].find('%doc_position') != -1:
result = {"value": request.user.doctorprofile.get_position()}
elif request_data["fieldPk"].find('%patient_fio') != -1:
result = {"value": data['fio']}
elif request_data["fieldPk"].find('%patient_family') != -1:
result = {"value": data['family']}
elif request_data["fieldPk"].find('%mother_family') != -1:
result = {"value": mother_data['family']}
elif request_data["fieldPk"].find('%mother_name') != -1:
result = {"value": mother_data['name']}
elif request_data["fieldPk"].find('%mother_patronymic') != -1:
result = {"value": mother_data['patronymic']}
elif request_data["fieldPk"].find('%patient_born') != -1:
result = {"value": data['born']}
elif request_data["fieldPk"].find('%mother_born') != -1:
result = {"value": mother_data['born']}
elif request_data["fieldPk"].find('%snils') != -1:
result = {"value": data['snils']}
elif request_data["fieldPk"].find('%mother_snils') != -1:
result = {"value": mother_data['snils']}
elif request_data["fieldPk"].find('%polis_enp') != -1:
result = {"value": data['enp']}
elif request_data["fieldPk"].find('%mother_polis_enp') != -1:
result = {"value": mother_data['enp']}
elif request_data["fieldPk"].find('%tfoms-attachment') != -1:
tfoms_data = c.individual.match_tfoms()
if not tfoms_data or not isinstance(tfoms_data, dict):
return status_response(False, 'ะะฐัะธะตะฝั ะฝะต ะฝะฐะนะดะตะฝ ะฒ ะฑะฐะทะต ะขะคะะะก', {'value': '000000 โ ะฝะต ะฝะฐะนะดะตะฝะพ'})
idt = tfoms_data['idt']
from tfoms.integration import get_attachment_by_idt
attachment_data = get_attachment_by_idt(idt)
if not attachment_data or not isinstance(attachment_data, dict) or not attachment_data.get('unit_code') or not attachment_data.get('area_name'):
return status_response(False, 'ะะต ะฝะฐะนะดะตะฝะพ ะฟัะธะบัะตะฟะปะตะฝะธะต ะฟะฐัะธะตะฝัะฐ ะฟะพ ะฑะฐะทะต ะขะคะะะก', {'value': '000000 โ ะฝะต ะฝะฐะนะดะตะฝะพ'})
return status_response(True, data={'value': f'{attachment_data["unit_code"]} โ {attachment_data["area_name"]}'})
elif request_data["fieldPk"].find('%document_type') != -1:
if data['passport_num']:
result = {"value": "1-ะะฐัะฟะพัั ะณัะฐะถะดะฐะฝะธะฝะฐ ะ ะพััะธะนัะบะพะน ะคะตะดะตัะฐัะธะธ"}
elif not data['passport_num'] and data['bc_num']:
result = {"value": "6-ะกะฒะธะดะตัะตะปัััะฒะพ ะพ ัะพะถะดะตะฝะธะธ"}
elif request_data["fieldPk"].find('%mother_document_type') != -1:
if mother_data['passport_num']:
result = {"value": "1-ะะฐัะฟะพัั ะณัะฐะถะดะฐะฝะธะฝะฐ ะ ะพััะธะนัะบะพะน ะคะตะดะตัะฐัะธะธ"}
elif request_data["fieldPk"].find('%doc_serial') != -1:
if data['passport_num']:
result = {"value": data["passport_serial"]}
elif not data['passport_serial'] and data['bc_num']:
result = {"value": data["bc_serial"]}
elif request_data["fieldPk"].find('%mother_passport_serial') != -1:
if mother_data['passport_num']:
result = {"value": mother_data["passport_serial"]}
elif request_data["fieldPk"].find('%doc_number') != -1:
if data['passport_num']:
result = {"value": data["passport_num"]}
elif not data['passport_serial'] and data['bc_num']:
result = {"value": data["bc_num"]}
elif request_data["fieldPk"].find('%mother_passport_num') != -1:
if mother_data['passport_num']:
result = {"value": mother_data["passport_num"]}
elif request_data["fieldPk"].find('%doc_who_issue') != -1:
if data['passport_num']:
result = {"value": data["passport_issued"]}
elif not data['passport_serial'] and data['bc_num']:
result = {"value": data["bc_issued"]}
elif request_data["fieldPk"].find('%mother_passport_who') != -1:
if mother_data['passport_num']:
result = {"value": mother_data["passport_issued"]}
elif request_data["fieldPk"].find('%doc_date_issue') != -1:
if data['passport_num']:
result = {"value": data["passport_date_start"]}
elif not data['passport_serial'] and data['bc_num']:
result = {"value": data["bc_date_start"]}
elif request_data["fieldPk"].find('%mother_passport_date_issue') != -1:
if mother_data['passport_num']:
result = {"value": mother_data["passport_date_start"]}
elif request_data["fieldPk"].find('%fact_address') != -1:
result = {"value": c.fact_address}
elif request_data["fieldPk"].find('%full_fact_address') != -1:
result = {"value": c.fact_address_full}
elif request_data["fieldPk"].find('%phone') != -1:
result = {"value": c.phone}
elif request_data["fieldPk"].find('%current_manager') != -1:
current_iss = request_data["iss_pk"]
num_dir = Issledovaniya.objects.get(pk=current_iss).napravleniye_id
hospital_manager = Napravleniya.objects.get(pk=num_dir).hospital.current_manager
result = {"value": hospital_manager}
elif request_data["fieldPk"].find('%work_position') != -1:
work_position = ""
work_data = c.work_position.split(';')
if len(work_data) >= 1:
work_position = work_data[0]
result = {"value": work_position.strip()}
elif request_data["fieldPk"].find('%work_department') != -1:
work_department = ""
work_data = c.work_position.split(';')
if len(work_data) >= 2:
work_department = work_data[1]
result = {"value": work_department.strip()}
elif request_data["fieldPk"].find('%harmful_factor') != -1:
result = {"value": c.harmful_factor}
elif request_data["fieldPk"].find('%proto_operation') != -1:
current_iss = request_data["iss_pk"]
num_dir = Issledovaniya.objects.get(pk=current_iss).napravleniye_id
# ะฟะพะปััะธัั ะฒัะต ะฝะฐะฟัะฐะฒะปะตะฝะธั ะฒ ะธััะพัะธะธ ะฟะพ ัะธะฟั hosp
main_hosp_dir = hosp_get_hosp_direction(num_dir)[0]
operations_data = hosp_get_operation_data(main_hosp_dir['direction'])
field_is_aggregate_operation = True
elif request_data["fieldPk"].find('%directionparam') != -1:
id_field = request_data["fieldPk"].split(":")
current_iss = request_data["iss_pk"]
num_dir = Issledovaniya.objects.get(pk=current_iss).napravleniye_id
val = DirectionParamsResult.objects.values_list('value', flat=True).filter(napravleniye_id=num_dir, field_id=id_field[1]).first()
result = {"value": val}
elif request_data["fieldPk"].find('%prevDirectionFieldValue') != -1:
_, field_id = request_data["fieldPk"].split(":")
current_iss = request_data["iss_pk"]
client_id = Issledovaniya.objects.get(pk=current_iss).napravleniye.client_id
val = (
ParaclinicResult.objects.filter(field_id=field_id, issledovaniye__napravleniye__client=client_id)
.exclude(issledovaniye_id=current_iss)
.exclude(issledovaniye__time_confirmation__isnull=True)
.order_by('issledovaniye__time_confirmation')
.values_list('value', flat=True)
.first()
)
result = {"value": val, "isJson": False if not val or not isinstance(val, str) else ((val.startswith("{") and val.endswith("}")) or (val.startswith("[") and val.endswith("]")))}
elif request_data["fieldPk"].find('%proto_description') != -1 and 'iss_pk' in request_data:
aggregate_data = hosp_get_text_iss(request_data['iss_pk'], True, 'desc')
field_is_aggregate_proto_description = True
elif request_data["fieldPk"].find("|") > -1:
field_is_link = True
logical_or = True
field_pks = request_data["fieldPk"].split('|')
if request_data["fieldPk"].find('@') > -1:
logical_group_or = True
elif request_data["fieldPk"].find("&") > -1:
field_is_link = True
logical_and = True
field_pks = request_data["fieldPk"].split('&')
else:
field_pks = [request_data["fieldPk"]]
logical_or = True
field_is_link = True
if field_is_link:
result = field_get_link_data(field_pks, client_pk, logical_or, logical_and, logical_group_or)
elif field_is_aggregate_operation:
result = field_get_aggregate_operation_data(operations_data)
elif field_is_aggregate_proto_description:
result = field_get_aggregate_text_protocol_data(aggregate_data)
return JsonResponse({"result": result})
def get_current_direction(current_iss):
return Issledovaniya.objects.get(pk=current_iss).napravleniye_id
def field_get_link_data(field_pks, client_pk, logical_or, logical_and, logical_group_or):
result, value, temp_value = None, None, None
for current_field_pk in field_pks:
group_fields = [current_field_pk]
logical_and_inside = logical_and
logical_or_inside = logical_or
if current_field_pk.find('@') > -1:
group_fields = get_input_fields_by_group(current_field_pk)
logical_and_inside = True
logical_or_inside = False
for field_pk in group_fields:
if field_pk.isdigit():
rows = get_field_result(client_pk, int(field_pk))
if rows:
row = rows[0]
value = row[5]
match = re.fullmatch(r'\d{4}-\d\d-\d\d', value)
if match:
value = normalize_date(value)
if logical_or_inside:
result = {"direction": row[1], "date": row[4], "value": value}
if value:
break
if logical_and_inside:
r = ParaclinicInputField.objects.get(pk=field_pk)
titles = r.get_title()
if result is None:
result = {"direction": row[1], "date": row[4], "value": value}
else:
temp_value = result.get('value', ' ')
if value:
result["value"] = f"{temp_value} {titles} - {value};"
if logical_group_or and temp_value or logical_or_inside and value:
break
return result
def field_get_aggregate_operation_data(operations_data):
result = None
count = 0
if len(operations_data) > 0:
for i in operations_data:
count += 1
value = (
f"{count}) ะะฐะทะฒะฐะฝะธะต ะพะฟะตัะฐัะธะธ: {i['name_operation']}, ะัะพะฒะตะดะตะฝะฐ: {i['date']} {i['time_start']}-{i['time_end']}, ะะตัะพะด ะพะฑะตะทะฑะพะปะธะฒะฐะฝะธั: {i['anesthesia method']}, "
f"ะัะปะพะถะฝะตะฝะธั: {i['complications']}, ะะฟะตัะธัะพะฒะฐะป: {i['doc_fio']}"
)
if result is None:
result = {"direction": '', "date": '', "value": value}
else:
temp_value = result.get('value', ' ')
result["value"] = f"{temp_value}\n{value};"
return result
def field_get_aggregate_text_protocol_data(data):
value = ''
for research in data:
value = f"{value}[{research['title_research']}]"
for res in research['result']:
value = f"{value}\n[{res.get('date', '')}]\n"
if res.get('data', ''):
for g in res['data']:
value = f"{value}{g.get('group_title', '')}"
group_fields = g.get('fields', '')
if group_fields:
for fied_data in group_fields:
value = f"{value}{fied_data['title_field']}: {fied_data['value']}"
value = f"{value}\n"
result = {"direction": '', "date": '', "value": value}
return result
def get_input_fields_by_group(group_pk):
group_pk = group_pk[0:-1]
fields_group = ParaclinicInputField.objects.values_list('id').filter(group__pk=group_pk).order_by('order')
field_result = [str(i[0]) for i in fields_group]
return field_result
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน")
def send_amd(request):
request_data = json.loads(request.body)
for direction in Napravleniya.objects.filter(pk__in=request_data["pks"]):
if direction.amd_status in ['error', 'need']:
direction.need_resend_amd = True
direction.amd_number = None
direction.error_amd = False
direction.save()
return JsonResponse({"ok": True})
@group_required("ะฃะฟัะฐะฒะปะตะฝะธะต ะพัะฟัะฐะฒะบะพะน ะฒ ะะะ")
def reset_amd(request):
request_data = json.loads(request.body)
for direction in Napravleniya.objects.filter(pk__in=request_data["pks"]):
direction.need_resend_amd = False
direction.amd_number = None
direction.error_amd = False
direction.save()
return JsonResponse({"ok": True})
def purposes(request):
result = [{"pk": "NONE", "title": " โ ะะต ะฒัะฑัะฐะฝะพ"}]
for p in Napravleniya.PURPOSES:
result.append(
{
"pk": p[0],
"title": p[1],
}
)
return JsonResponse({"purposes": result})
def external_organizations(request):
result = [
{"pk": "NONE", "title": " โ ะะต ะฒัะฑัะฐะฝะพ"},
]
for e in ExternalOrganization.objects.filter(hide=False).order_by('pk'):
result.append(
{
"pk": e.pk,
"title": e.title,
}
)
data = {"organizations": result}
if hasattr(request, 'plain_response') and request.plain_response:
return data
return JsonResponse(data)
@login_required
def direction_in_favorites(request):
request_data = json.loads(request.body)
pk = request_data['pk']
doc: DoctorProfile = request.user.doctorprofile
is_update = request_data.get("update", False)
if is_update and 'status' in request_data:
new_status = request_data.get("status", False)
if not new_status:
DirectionToUserWatch.objects.filter(doc=doc, direction_id=pk).delete()
else:
DirectionToUserWatch(doc=doc, direction_id=pk).save()
return JsonResponse({"ok": True})
dtuw = DirectionToUserWatch.objects.filter(doc=doc, direction_id=pk)
return JsonResponse({"status": dtuw.exists()})
@login_required
def all_directions_in_favorites(request):
doc: DoctorProfile = request.user.doctorprofile
data = [
{
"pk": x.pk,
"direction": x.direction_id,
"card": x.direction.client.number_with_type(),
"client": x.direction.client.individual.fio(full=True),
}
for x in DirectionToUserWatch.objects.filter(doc=doc).order_by('pk')
]
return JsonResponse({"data": data})
@login_required
def directions_type_date(request):
podr = request.user.doctorprofile.podrazdeleniye
doc_pk = request.user.doctorprofile.pk
request_data = json.loads(request.body)
is_lab = request_data.get('is_lab', False)
is_paraclinic = request_data.get('is_paraclinic', False)
is_doc_refferal = request_data.get('is_doc_refferal', False)
by_doc = request_data.get('by_doc', False)
date = request_data['date']
date = normalize_date(date)
d1 = datetime.strptime(date, '%d.%m.%Y')
start_date = datetime.combine(d1, dtime.min)
end_date = datetime.combine(d1, dtime.max)
if not is_lab and not is_doc_refferal and not is_paraclinic:
return JsonResponse({"results": []})
if is_lab:
lab_podr = get_lab_podr()
lab_podr = [i[0] for i in lab_podr]
else:
lab_podr = [-1]
confirm_direction = get_confirm_direction(start_date, end_date, lab_podr, is_lab, is_paraclinic, is_doc_refferal)
if not confirm_direction:
return JsonResponse({"results": []})
confirm_direction = [i[0] for i in confirm_direction]
if not by_doc:
confirm_direction_department = filter_direction_department(confirm_direction, int(podr.pk))
else:
confirm_direction_department = filter_direction_doctor(confirm_direction, doc_pk)
confirm_direction = [i[0] for i in confirm_direction_department]
not_confirm_direction = get_not_confirm_direction(confirm_direction)
not_confirm_direction = [i[0] for i in not_confirm_direction]
result_direction = list(set(confirm_direction) - set(not_confirm_direction))
return JsonResponse({"results": result_direction})
@login_required
@group_required("ะฃะฟัะฐะฒะปะตะฝะธะต ะธะตัะฐัั
ะธะตะน ะธััะพัะธะธ")
def change_owner_direction(request):
user = request.user.doctorprofile
request_data = json.loads(request.body)
new_card_number = request_data['new_card_number']
old_card_number = request_data['old_card_number']
directions = DirectionsHistory.move_directions(old_card_number, new_card_number, user)
directions = ', '.join([str(d.pk) for d in directions])
return JsonResponse({"directions": directions})
@login_required
def directions_result_year(request):
request_data = json.loads(request.body)
is_lab = request_data.get('isLab', False)
is_paraclinic = request_data.get('isParaclinic', False)
is_doc_refferal = request_data.get('isDocReferral', False)
year = request_data['current_year']
d1 = datetime.strptime(f'01.01.{year}', '%d.%m.%Y')
start_date = datetime.combine(d1, dtime.min)
d2 = datetime.strptime(f'31.12.{year}', '%d.%m.%Y')
end_date = datetime.combine(d2, dtime.max)
card_pk = request_data.get('card_pk', -1)
if not is_lab and not is_doc_refferal and not is_paraclinic or card_pk == -1:
return JsonResponse({"results": []})
if is_lab:
lab_podr = get_lab_podr()
lab_podr = [i[0] for i in lab_podr]
else:
lab_podr = [-1]
card_pk = int(card_pk)
confirmed_directions = get_confirm_direction_patient_year(start_date, end_date, lab_podr, card_pk, is_lab, is_paraclinic, is_doc_refferal)
if not confirmed_directions:
return JsonResponse({"results": []})
directions = {}
for d in confirmed_directions:
if d.direction not in directions:
directions[d.direction] = {
'dir': d.direction,
'date': d.ch_time_confirmation,
'researches': [],
}
directions[d.direction]['researches'].append(d.research_title)
return JsonResponse({"results": list(directions.values())})
@login_required
def results_by_direction(request):
request_data = json.loads(request.body)
is_lab = request_data.get('isLab', False)
is_paraclinic = request_data.get('isParaclinic', False)
is_doc_refferal = request_data.get('isDocReferral', False)
direction = request_data.get('dir')
directions = request_data.get('directions', [])
if not directions and direction:
directions = [direction]
objs_result = {}
if is_lab:
direction_result = get_laboratory_results_by_directions(directions)
for r in direction_result:
if r.direction not in objs_result:
objs_result[r.direction] = {'dir': r.direction, 'date': r.date_confirm, 'researches': {}}
if r.iss_id not in objs_result[r.direction]['researches']:
objs_result[r.direction]['researches'][r.iss_id] = {'title': r.research_title, 'fio': short_fio_dots(r.fio), 'dateConfirm': r.date_confirm, 'fractions': []}
objs_result[r.direction]['researches'][r.iss_id]['fractions'].append({'title': r.fraction_title, 'value': r.value, 'units': r.units})
if is_paraclinic or is_doc_refferal:
results = desc_to_data(directions, force_all_fields=True)
for i in results:
direction_data = i['result'][0]["date"].split(' ')
if direction_data[1] not in objs_result:
objs_result[direction_data[1]] = {'dir': direction_data[1], 'date': direction_data[0], 'researches': {}}
if i['result'][0]["iss_id"] not in objs_result[direction_data[1]]['researches']:
objs_result[direction_data[1]]['researches'][i['result'][0]["iss_id"]] = {
'title': i['title_research'],
'fio': short_fio_dots(i['result'][0]["docConfirm"]),
'dateConfirm': direction_data[0],
'fractions': [],
}
values = values_from_structure_data(i['result'][0]["data"])
objs_result[direction_data[1]]['researches'][i['result'][0]["iss_id"]]["fractions"].append({'value': values})
return JsonResponse({"results": list(objs_result.values())})
def get_research_for_direction_params(pk):
response = {}
if isinstance(pk, Researches):
research_obj = pk
elif isinstance(pk, (int, str)) and int(pk) > -1:
research_obj = Researches.objects.get(pk=int(pk))
else:
return response
response["research"] = {
"title": research_obj.title,
"version": research_obj.pk * 10000,
"is_paraclinic": research_obj.is_paraclinic or research_obj.is_citology or research_obj.is_gistology,
"is_doc_refferal": research_obj.is_doc_refferal,
"is_microbiology": research_obj.is_microbiology,
"is_treatment": research_obj.is_treatment,
"is_stom": research_obj.is_stom,
"wide_headers": research_obj.wide_headers,
"groups": [],
"show": False,
"status": 'LOADED',
}
for group in research_obj.paraclinicinputgroups_set.all().filter(hide=False):
g = {
"pk": group.pk,
"order": group.order,
"title": group.title,
"show_title": group.show_title,
"hide": group.hide,
"display_hidden": False,
"fields": [],
"visibility": group.visibility,
}
for field in group.paraclinicinputfield_set.all().filter(hide=False).order_by("order"):
field_type = field.field_type
g["fields"].append(
{
"pk": field.pk,
"order": field.order,
"lines": field.lines,
"title": field.title,
"hide": field.hide,
"values_to_input": ([] if not field.required or field_type not in [10, 12] else ['- ะะต ะฒัะฑัะฐะฝะพ']) + json.loads(field.input_templates),
"value": (field.default_value if field_type not in [3, 11, 13, 14] else '') if field_type not in [1, 20] else (get_default_for_field(field_type)),
"field_type": field_type,
"default_value": field.default_value,
"visibility": field.visibility,
"required": field.required,
"helper": field.helper,
}
)
response["research"]["groups"].append(g)
return response
@login_required
def tubes_for_get(request):
parse_params = {
'pk': str,
}
try:
direction_pk = int(data_parse(request.body, parse_params)[0])
if direction_pk >= 4600000000000:
direction_pk -= 4600000000000
direction_pk //= 10
direction = (
Napravleniya.objects.select_related('hospital')
.select_related('doc')
.select_related('doc__podrazdeleniye')
.select_related('imported_org')
.select_related('client')
.select_related('client__individual')
.prefetch_related(
Prefetch(
'issledovaniya_set',
Issledovaniya.objects.filter(research__fractions__isnull=False)
.select_related('research')
.select_related('research__podrazdeleniye')
.prefetch_related(
Prefetch('research__fractions_set', Fractions.objects.filter(hide=False).select_related('relation').prefetch_related('fupper').prefetch_related('flower'))
)
.prefetch_related(Prefetch('tubes', TubesRegistration.objects.select_related('type').select_related('doc_get').select_related('type__tube')))
.order_by("research__title"),
)
)
.get(pk=direction_pk)
)
except:
return status_response(False, "ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฝะฐะนะดะตะฝะพ")
if direction.get_hospital() != request.user.doctorprofile.get_hospital():
return status_response(False, "ะะฐะฟัะฐะฒะปะตะฝะธะต ะดะปั ะดััะณะพะน ะพัะณะฐะฝะธะทะฐัะธะธ")
data = {}
data["direction"] = {
"pk": direction.pk,
"cancel": direction.cancel,
"date": str(dateformat.format(direction.data_sozdaniya.date(), settings.DATE_FORMAT)),
"doc": {"fio": "" if not direction.doc else direction.doc.get_fio(), "otd": "" if not direction.doc else direction.doc.podrazdeleniye.title},
"imported_from_rmis": direction.imported_from_rmis,
"imported_org": "" if not direction.imported_org else direction.imported_org.title,
"full_confirm": True,
"has_not_completed": False,
}
data["tubes"] = {}
tubes_buffer = {}
fresearches = {}
fuppers = {}
flowers = {}
iss_cached = list(direction.issledovaniya_set.all())
for i in iss_cached:
for fr in i.research.fractions_set.all():
absor = fr.fupper.all()
if absor.exists():
fuppers[fr.pk] = True
fresearches[fr.research_id] = True
for absor_obj in absor:
flowers[absor_obj.flower_id] = True
fresearches[absor_obj.flower.research_id] = True
for v in iss_cached:
if data["direction"]["full_confirm"] and not i.time_confirmation:
data["direction"]["full_confirm"] = False
has_rels = {x.type_id: x for x in v.tubes.all()}
new_tubes = []
for val in v.research.fractions_set.all():
vrpk = val.relation_id
rel = val.relation
if vrpk not in has_rels and i.time_confirmation:
continue
if val.research_id in fresearches and val.pk in flowers and not i.time_confirmation:
absor = val.flower.all().first()
if absor.fupper_id in fuppers:
vrpk = absor.fupper.relation_id
rel = absor.fupper.relation
if vrpk not in tubes_buffer:
if vrpk not in has_rels:
ntube = TubesRegistration(type=rel)
ntube.save()
has_rels[vrpk] = ntube
new_tubes.append(ntube)
else:
ntube = has_rels[vrpk]
tubes_buffer[vrpk] = {"researches": set(), "labs": set(), "tube": ntube}
else:
ntube = tubes_buffer[vrpk]["tube"]
tubes_buffer[vrpk]["researches"].add(v.research.title)
podr = v.research.get_podrazdeleniye()
if podr:
tubes_buffer[vrpk]["labs"].add(podr.get_title())
if new_tubes:
v.tubes.add(*new_tubes)
data["details"] = {}
for key in tubes_buffer:
v = tubes_buffer[key]
tube = v["tube"]
barcode = ""
if tube.barcode:
barcode = tube.barcode
lab = '; '.join(sorted(v["labs"]))
if lab not in data["tubes"]:
data["tubes"][lab] = {}
if tube.pk not in data["tubes"][lab]:
tube_title = tube.type.tube.title
tube_color = tube.type.tube.color
status = tube.getstatus()
data["tubes"][lab][tube.pk] = {
"researches": list(v["researches"]),
"status": status,
"checked": True,
"color": tube_color,
"title": tube_title,
"id": tube.pk,
"barcode": barcode,
}
data['details'][tube.pk] = tube.get_details()
if not data["direction"]["has_not_completed"] and not status:
data["direction"]["has_not_completed"] = True
if not data["tubes"]:
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฒ ะปะฐะฑะพัะฐัะพัะธั')
individual = direction.client.individual
data["client"] = {
"card": direction.client.number_with_type(),
"fio": individual.fio(),
"sex": individual.sex,
"birthday": individual.bd(),
"age": individual.age_s(direction=direction),
}
return status_response(True, data=data)
@login_required
def tubes_register_get(request):
pks = data_parse(request.body, {'pks': list})[0]
get_details = {}
for pk in pks:
val = TubesRegistration.objects.get(id=pk)
if not val.doc_get and not val.time_get:
val.set_get(request.user.doctorprofile)
get_details[pk] = val.get_details()
return status_response(True, data={'details': get_details})
@login_required
def tubes_for_confirm(request):
tmprows = {}
res = {"rows": []}
date_start = datetime.now() - timedelta(days=6)
date_end = datetime.now()
naps = Napravleniya.objects.filter(
Q(data_sozdaniya__range=(date_start, date_end), doc_who_create=request.user.doctorprofile, cancel=False)
| Q(data_sozdaniya__range=(date_start, date_end), doc=request.user.doctorprofile, cancel=False)
)
for n in naps:
for i in Issledovaniya.objects.filter(napravleniye=n):
for t in i.tubes.filter(doc_get__isnull=True):
tmprows[t.pk] = {
"direction": n.pk,
"patient": n.client.individual.fio(short=True, dots=True),
"title": t.type.tube.title,
"pk": t.pk,
"color": t.type.tube.color,
"checked": True,
}
for pk in tmprows.keys():
res["rows"].append(tmprows[pk])
res["rows"] = sorted(res["rows"], key=lambda k: k['pk'])
res["rows"] = sorted(res["rows"], key=lambda k: k['patient'])
return JsonResponse(res)
@login_required
def tubes_get_history(request):
data = json.loads(request.body)
pks = data.get('pks')
res = {"rows": []}
tubes = TubesRegistration.objects.filter(doc_get=request.user.doctorprofile).order_by('-time_get').exclude(time_get__lt=datetime.now().date())
if pks:
tubes = tubes.filter(pk__in=pks)
for v in tubes:
iss = Issledovaniya.objects.filter(tubes__pk=v.pk)
res["rows"].append(
{
"pk": v.pk,
"direction": iss[0].napravleniye_id,
"title": v.type.tube.title,
"color": v.type.tube.color,
"researches": ', '.join(str(x.research.title) for x in iss),
"time": strtime(v.time_get),
"checked": True,
}
)
return JsonResponse(res)
@login_required
def gen_number(request):
data = json.loads(request.body)
key = data['numberKey']
iss_pk = data['issPk']
field_pk = data['fieldPk']
with transaction.atomic():
iss: Issledovaniya = Issledovaniya.objects.get(pk=iss_pk)
if iss.time_confirmation:
return status_response(False, 'ะัะพัะพะบะพะป ัะถะต ะฟะพะดัะฒะตัะดะถัะฝ')
gen: NumberGenerator = NumberGenerator.objects.select_for_update().filter(key=key, year=current_year(), hospital=iss.napravleniye.get_hospital(), is_active=True).first()
if not gen:
return status_response(False, 'ะะบัะธะฒะฝัะน ะณะตะฝะตัะฐัะพั ะฝะฐ ัะตะบััะธะน ะณะพะด ะดะปั ะพัะณะฐะฝะธะทะฐัะธะธ ะฝะฐะฟัะฐะฒะปะตะฝะธั ะฝะต ะทะฐัะตะณะธัััะธัะพะฒะฐะฝ')
field: ParaclinicResult = ParaclinicResult.objects.filter(issledovaniye=iss, field_id=field_pk).first()
if not field:
field = ParaclinicResult.objects.create(issledovaniye=iss, field_id=field_pk, field_type=30)
if field.field_type != 30:
field.field_type = 30
field.save()
if field.value:
return status_response(True, 'ะะฝะฐัะตะฝะธะต ัะถะต ะฑัะปะพ ัะณะตะฝะตัะธัะพะฒะฐะฝะพ', {'number': field.value})
next_value = None
min_last_value = gen.last if gen.last else (gen.start - 1)
if gen.free_numbers:
min_last_value = min(min_last_value, *gen.free_numbers)
if not gen.last or gen.last == min_last_value:
next_value = min_last_value + 1
if next_value > gen.end:
return status_response(False, 'ะะฝะฐัะตะฝะธั ะณะตะฝะตัะฐัะพัะฐ ะทะฐะบะพะฝัะธะปะธัั')
gen.last = next_value
else:
next_value = min_last_value
gen.free_numbers = [x for x in gen.free_numbers if x != next_value]
gen.save(update_fields=['last', 'free_numbers'])
total_free_numbers = len([x for x in gen.free_numbers if x <= gen.last]) + (gen.end - gen.last)
total_numbers = (gen.end - gen.start) + 1
number = str(next_value).zfill(gen.prepend_length)
field.value = number
field.save()
return status_response(True, None, {'number': number, 'totalFreeNumbers': total_free_numbers, 'totalNumbers': total_numbers})
@login_required
def free_number(request):
data = json.loads(request.body)
key = data['numberKey']
iss_pk = data['issPk']
field_pk = data['fieldPk']
with transaction.atomic():
iss: Issledovaniya = Issledovaniya.objects.get(pk=iss_pk)
if iss.time_confirmation:
return status_response(False, 'ะัะพัะพะบะพะป ัะถะต ะฟะพะดัะฒะตัะดะถัะฝ')
gen: NumberGenerator = NumberGenerator.objects.select_for_update().filter(key=key, year=current_year(), hospital=iss.napravleniye.get_hospital(), is_active=True).first()
if not gen:
return status_response(False, 'ะะบัะธะฒะฝัะน ะณะตะฝะตัะฐัะพั ะฝะฐ ัะตะบััะธะน ะณะพะด ะดะปั ะพัะณะฐะฝะธะทะฐัะธะธ ะฝะฐะฟัะฐะฒะปะตะฝะธั ะฝะต ะทะฐัะตะณะธัััะธัะพะฒะฐะฝ')
field: ParaclinicResult = ParaclinicResult.objects.filter(issledovaniye=iss, field_id=field_pk).first()
if key == "deathPerinatalNumber":
field_type = 37
else:
field_type = 30
if not field:
field = ParaclinicResult.objects.create(issledovaniye=iss, field_id=field_pk, field_type=field_type)
if field.field_type != 30 and key == "deathFormNumber":
field.field_type = 30
field.save()
if field.field_type != 37 and key == "deathPerinatalNumber":
field.field_type = 37
field.save()
if not field.value:
return status_response(True)
value = int(field.value)
field.value = ''
field.save()
if value >= gen.start and value <= gen.end:
gen.free_numbers = [*gen.free_numbers, value]
gen.save(update_fields=['free_numbers'])
return status_response(True)
@login_required
def eds_required_signatures(request):
data = json.loads(request.body)
pk = data['pk']
direction: Napravleniya = Napravleniya.objects.get(pk=pk)
if direction.get_hospital() != request.user.doctorprofile.get_hospital():
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฒ ะฒะฐัั ะพัะณะฐะฝะธะทะฐัะธั!')
if not direction.is_all_confirm():
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะดะพะปะถะฝะพ ะฑััั ะฟะพะดัะฒะตัะถะดะตะฝะพ!')
rs = direction.required_signatures(fast=True, need_save=True)
result = {'documents': []}
ltc = direction.last_time_confirm()
for r in rs['docTypes']:
dd: DirectionDocument = DirectionDocument.objects.filter(direction=direction, is_archive=False, last_confirmed_at=ltc, file_type=r.lower()).first()
has_signatures = []
empty_signatures = rs['signsRequired']
if dd:
for s in DocumentSign.objects.filter(document=dd):
has_signatures.append(s.sign_type)
empty_signatures = [x for x in empty_signatures if x != s.sign_type]
status = len(empty_signatures) == 0
result['documents'].append(
{
'type': r,
'status': status,
'has': has_signatures,
'empty': empty_signatures,
}
)
return JsonResponse(result)
@login_required
def eds_documents(request):
data = json.loads(request.body)
pk = data['pk']
direction: Napravleniya = Napravleniya.objects.get(pk=pk)
if direction.get_hospital() != request.user.doctorprofile.get_hospital():
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฒ ะฒะฐัั ะพัะณะฐะฝะธะทะฐัะธั!')
if not direction.is_all_confirm():
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะดะพะปะถะฝะพ ะฑััั ะฟะพะดัะฒะตัะถะดะตะฝะพ!')
required_signatures = direction.required_signatures(need_save=True)
documents = []
has_types = {}
last_time_confirm = direction.last_time_confirm()
d: DirectionDocument
for d in DirectionDocument.objects.filter(direction=direction, last_confirmed_at=last_time_confirm, is_archive=False):
has_types[d.file_type.lower()] = True
for t in [x for x in required_signatures['docTypes'] if x.lower() not in has_types]:
DirectionDocument.objects.create(direction=direction, last_confirmed_at=last_time_confirm, file_type=t.lower())
DirectionDocument.objects.filter(direction=direction, is_archive=False).exclude(last_confirmed_at=last_time_confirm).update(is_archive=True)
cda_eds_data = get_cda_data(pk)
for d in DirectionDocument.objects.filter(direction=direction, last_confirmed_at=last_time_confirm):
if not d.file:
file = None
filename = None
if d.file_type.lower() != d.file_type:
d.file_type = d.file_type.lower()
d.save()
if d.file_type == DirectionDocument.PDF:
request_tuple = collections.namedtuple('HttpRequest', ('GET', 'user', 'plain_response'))
req = {
'GET': {
"pk": f'[{pk}]',
"split": '1',
"leftnone": '0',
"inline": '1',
"protocol_plain_text": '1',
},
'user': request.user,
'plain_response': True,
}
filename = f'{pk}-{last_time_confirm}.pdf'
file = ContentFile(result_print(request_tuple(**req)), filename)
elif d.file_type == DirectionDocument.CDA:
if SettingManager.l2('l2vi'):
cda_data = gen_cda_xml(pk=pk)
cda_xml = cda_data.get('result', {}).get('content')
else:
cda_xml = render_cda(service=cda_eds_data['title'], direction_data=cda_eds_data)
filename = f"{pk}โ{last_time_confirm}.cda.xml"
if cda_xml:
file = ContentFile(cda_xml.encode('utf-8'), filename)
else:
file = None
if file:
d.file.save(filename, file)
signatures = {}
has_signatures = DocumentSign.objects.filter(document=d)
sgn: DocumentSign
for sgn in has_signatures:
signatures[sgn.sign_type] = {
'pk': sgn.pk,
'executor': str(sgn.executor),
'signedAt': strfdatetime(sgn.signed_at),
'signValue': sgn.sign_value,
}
for s in [x for x in required_signatures['signsRequired'] if x not in signatures]:
signatures[s] = None
file_content = None
if d.file:
if d.file_type == DirectionDocument.PDF:
file_content = base64.b64encode(d.file.read()).decode('utf-8')
elif d.file_type == DirectionDocument.CDA:
file_content = d.file.read().decode('utf-8')
document = {
"pk": d.pk,
"type": d.file_type.upper(),
"fileName": os.path.basename(d.file.name) if d.file else None,
"fileContent": file_content,
"signatures": signatures,
"vi_id": direction.vi_id,
}
documents.append(document)
return JsonResponse({"documents": documents, "edsTitle": direction.get_eds_title(), "executors": direction.get_executors()})
@login_required
def eds_add_sign(request):
data = json.loads(request.body)
pk = data['pk']
sign = data['sign']
sign_type = data['mode']
direction_document: DirectionDocument = DirectionDocument.objects.get(pk=pk)
direction: Napravleniya = direction_document.direction
if direction.get_hospital() != request.user.doctorprofile.get_hospital():
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะฝะต ะฒ ะฒะฐัั ะพัะณะฐะฝะธะทะฐัะธั!')
if not direction.is_all_confirm():
return status_response(False, 'ะะฐะฟัะฐะฒะปะตะฝะธะต ะดะพะปะถะฝะพ ะฑััั ะฟะพะดัะฒะตัะถะดะตะฝะพ!')
if not sign:
return status_response(False, 'ะะตะบะพััะตะบัะฝะฐั ะฟะพะดะฟะธัั!')
user_roles = request.user.doctorprofile.get_eds_allowed_sign()
if sign_type not in user_roles:
return status_response(False, 'ะฃ ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะตั ัะฐะบะพะน ัะพะปะธ!')
required_signatures = direction.required_signatures(need_save=True)
if sign_type not in required_signatures['signsRequired']:
return status_response(False, 'ะะตะบะพััะตะบัะฝะฐั ัะพะปั!')
last_time_confirm = direction.last_time_confirm()
if direction_document.last_confirmed_at != last_time_confirm:
return status_response(False, 'ะะพะบัะผะตะฝั ะฑัะป ะพะฑะฝะพะฒะปัะฝ. ะะฑะฝะพะฒะธัะต ัััะฐะฝะธัั!')
if DocumentSign.objects.filter(document=direction_document, sign_type=sign_type).exists():
return status_response(False, 'ะะพะบัะผะตะฝั ัะถะต ะฑัะป ะฟะพะดะฟะธัะฐะฝ ั ัะฐะบะพะน ัะพะปัั')
executors = direction.get_executors()
if sign_type == 'ะัะฐั' and request.user.doctorprofile.pk not in executors:
return status_response(False, 'ะะพะดัะฒะตัะดะธัั ะผะพะถะตั ัะพะปัะบะพ ะธัะฟะพะปะฝะธัะตะปั')
DocumentSign.objects.create(document=direction_document, sign_type=sign_type, executor=request.user.doctorprofile, sign_value=sign)
direction.get_eds_total_signed(forced=True)
return status_response(True)
@login_required
def eds_to_sign(request):
data = json.loads(request.body)
page = max(int(data["page"]), 1)
filters = data['filters']
mode = filters['mode']
department = filters['department']
status = filters['status']
number = filters['number']
rows = []
d_qs = Napravleniya.objects.filter(issledovaniya__time_confirmation__isnull=False).exclude(issledovaniya__time_confirmation__isnull=True)
if number:
d_qs = d_qs.filter(pk=number if number.isdigit() else -1)
else:
date = filters['date']
day1 = try_strptime(
date,
formats=(
'%Y-%m-%d',
'%d.%m.%Y',
),
)
day2 = day1 + timedelta(days=1)
d_qs = d_qs.filter(issledovaniya__time_confirmation__range=(day1, day2))
if mode == 'mo':
d_qs = d_qs.filter(eds_required_signature_types__contains=['ะะตะดะธัะธะฝัะบะฐั ะพัะณะฐะฝะธะทะฐัะธั'])
if department == -1:
d_qs = d_qs.filter(issledovaniya__doc_confirmation__hospital=request.user.doctorprofile.get_hospital())
else:
d_qs = d_qs.filter(issledovaniya__doc_confirmation__podrazdeleniye_id=department)
elif mode == 'my':
d_qs = d_qs.filter(eds_required_signature_types__contains=['ะัะฐั'], issledovaniya__doc_confirmation=request.user.doctorprofile)
if status == 'ok-full':
d_qs = d_qs.filter(eds_total_signed=True)
elif status == 'ok-role':
d_qs = d_qs.filter(eds_total_signed=False)
if mode == 'mo':
d_qs = d_qs.filter(directiondocument__documentsign__sign_type='ะะตะดะธัะธะฝัะบะฐั ะพัะณะฐะฝะธะทะฐัะธั', directiondocument__is_archive=False)
elif mode == 'my':
d_qs = d_qs.filter(directiondocument__documentsign__sign_type='ะัะฐั', directiondocument__is_archive=False)
else:
# TODO: ััั ะฝัะถะตะฝ ัะธะปััั, ััะพ ะฟะพะปััะตะฝั ะฒัะต ะฝะตะพะฑั
ะพะดะธะผัะต ะฟะพะดะฟะธัะธ, ะบัะพะผะต ะะตะดะธัะธะฝัะบะฐั ะพัะณะฐะฝะธะทะฐัะธั, ะตัะปะธ mode == 'mo'
# TODO: ััั ะฝัะถะตะฝ ัะธะปััั, ััะพ ะฝะต ะฟะพะปััะตะฝะฐ ะฟะพะดะฟะธัั ะัะฐั, ะตัะปะธ mode == 'my'
d_qs = d_qs.filter(eds_total_signed=False)
d: Napravleniya
p = Paginator(d_qs.order_by('pk', 'issledovaniya__time_confirmation').distinct('pk'), SettingManager.get("eds-to-sign_page-size", default='40', default_type='i'))
for d in p.page(page).object_list:
documents = []
ltc = d.last_time_confirm()
ldc = d.last_doc_confirm()
signs_required = d.eds_required_signature_types
for r in d.eds_required_documents:
dd: DirectionDocument = DirectionDocument.objects.filter(direction=d, is_archive=False, last_confirmed_at=ltc, file_type=r.lower()).first()
has_signatures = []
empty_signatures = signs_required
if dd:
for s in DocumentSign.objects.filter(document=dd):
has_signatures.append(s.sign_type)
empty_signatures = [x for x in empty_signatures if x != s.sign_type]
status = len(empty_signatures) == 0
documents.append(
{
'pk': dd.pk if dd else None,
'type': r,
'status': status,
'has': has_signatures,
'empty': empty_signatures,
}
)
rows.append(
{
'pk': d.pk,
'totallySigned': d.eds_total_signed,
'confirmedAt': strfdatetime(ltc),
'docConfirmation': ldc,
'documents': documents,
'services': [x.research.get_title() for x in d.issledovaniya_set.all()],
'n3number': d.n3_odli_id or d.n3_iemk_ok,
}
)
return JsonResponse({"rows": rows, "page": page, "pages": p.num_pages, "total": p.count})
@login_required
def expertise_status(request):
data = json.loads(request.body)
pk = data.get('pk', -1)
return JsonResponse(get_expertise(pk, with_check_available=True))
@login_required
def expertise_create(request):
data = json.loads(request.body)
pk = data.get('pk', -1)
n = Napravleniya.objects.get(pk=pk)
iss: Issledovaniya = n.issledovaniya_set.all().first()
created_pk = None
if iss and iss.research and iss.research.expertise_params:
result = Napravleniya.gen_napravleniya_by_issledovaniya(
n.client_id,
"",
None,
"",
None,
request.user.doctorprofile,
{-1: [iss.research.expertise_params_id]},
{},
False,
{},
vich_code="",
count=1,
discount=0,
parent_iss=iss.pk,
rmis_slot=None,
)
created_pk = result["list_id"][0]
return JsonResponse({"pk": created_pk})
@login_required
def send_to_l2vi(request):
data = json.loads(request.body)
pk = data.get('pk', -1)
doc: DirectionDocument = DirectionDocument.objects.get(pk=pk)
res = None
if doc.file:
res = send_cda_xml(doc.direction_id, doc.file.read().decode('utf-8'))
return JsonResponse({"ok": True, "data": res})
@login_required
@group_required("ะัะฐั ะฟะฐัะฐะบะปะธะฝะธะบะธ", "ะัะฐั ะบะพะฝััะปััะฐัะธะน", "ะะฐะฟะพะปะฝะตะฝะธะต ะผะพะฝะธัะพัะธะฝะณะพะฒ", "ะกะฒะธะดะตัะตะปัััะฒะพ ะพ ัะผะตััะธ-ะดะพัััะฟ")
def add_file(request):
file = request.FILES.get('file')
form = request.FILES['form'].read()
request_data = json.loads(form)
pk = request_data["pk"]
iss_files = IssledovaniyaFiles.objects.filter(issledovaniye_id=pk)
if file and iss_files.count() >= 5:
return JsonResponse(
{
"ok": False,
"message": "ะั ะดะพะฑะฐะฒะธะปะธ ัะปะธัะบะพะผ ะผะฝะพะณะพ ัะฐะนะปะพะฒ ะฒ ะพะดะฝั ะทะฐัะฒะบั",
}
)
if file and file.size > 5242880:
return JsonResponse(
{
"ok": False,
"message": "ะคะฐะนะป ัะปะธัะบะพะผ ะฑะพะปััะพะน",
}
)
iss = IssledovaniyaFiles(issledovaniye_id=pk, uploaded_file=file, who_add_files=request.user.doctorprofile)
iss.save()
return JsonResponse(
{
"ok": True,
}
)
@login_required
def file_log(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
rows = []
for row in IssledovaniyaFiles.objects.filter(issledovaniye_id=pk).order_by('-created_at'):
rows.append(
{
'pk': row.pk,
'author': row.who_add_files.get_fio(),
'createdAt': strfdatetime(row.created_at, "%d.%m.%Y %X"),
'file': row.uploaded_file.url if row.uploaded_file else None,
'fileName': os.path.basename(row.uploaded_file.name) if row.uploaded_file else None,
}
)
return JsonResponse(
{
"rows": rows,
}
)
| moodpulse/l2 | api/directions/views.py | Python | mit | 163,517 |
# -*- coding: utf-8 -*-
#
# Copyright ยฉ 2013 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
__version__ = '0.3.0.dev0'
# =============================================================================
# The following statements are required to register this 3rd party plugin:
# =============================================================================
from .memoryprofiler import MemoryProfiler
PLUGIN_CLASS = MemoryProfiler
| spyder-ide/spyder.memory_profiler | spyder_memory_profiler/__init__.py | Python | mit | 490 |
# Copyright 2009 Jean-Francois Houzard, Olivier Roger
#
# This file is part of pypassport.
#
# pypassport is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# pypassport is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pyPassport.
# If not, see <http://www.gnu.org/licenses/>.
from pypassport import doc9303
from pypassport.tlvparser import TLVParser, TLVParserException
from pypassport.asn1 import *
from pypassport.hexfunctions import *
from pypassport.logger import Logger
from pypassport.iso19794 import ISO19794_5
from pypassport.iso7816 import Iso7816
from pypassport.doc9303 import converter, mrz, bac
from pypassport.openssl import OpenSSL, OpenSSLException
from pypassport.derobjectidentifier import *
from pypassport.singleton import Singleton
from hashlib import *
import os, sys
#import Image
class DataGroupException(Exception):
def __init__(self, *params):
Exception.__init__(self, *params)
class DataGroupFile(object):
def __init__(self):
self.__tag = ""
self.__header = ""
self.__body = ""
self.__stop = False
def _setHeader(self, value):
self.__header = value
if value != "":
self.__tag = converter.toTAG(binToHexRep(value[0]))
def _setBody(self, value):
self.__body = value
def _getHeader(self):
return self.__header
def _getBody(self):
return self.__body
def _getFile(self):
return self.header + self.body
def _setTag(self, tag):
self.__tag = tag
def _getTag(self):
return self.__tag
def _setStop(self, value):
self.__stop = stop
def _getStop(self):
return self.__stop
header = property(_getHeader, _setHeader, None, None)
body = property(_getBody, _setBody, None, None)
file = property(_getFile)
tag = property(_getTag, _setTag)
class DataGroup(TLVParser, DataGroupFile):
def __init__(self, dgf=None):
DataGroupFile.__init__(self)
if dgf:
self.header = dgf.header
self.body = dgf.body
TLVParser.__init__(self, self.body)
def _getTag(self):
if (binToHex(self._data[self._byteNb]) & 0x0F == 0xF):
tag = binToHexRep(self._data[self._byteNb:self._byteNb+2]).upper()
self._byteNb += 2
else:
tag = binToHexRep(self._data[self._byteNb]).upper()
self._byteNb += 1
return tag
def parse(self):
try:
TLVParser.parse(self)
if self.__contains__("5C"):
self["5C"] = self._parseDataElementPresenceMap(self["5C"])
except TLVParserException, msg:
raise DataGroupException(msg)
return self
def _parseDataElementPresenceMap(self, depm):
"""
Convert concatenated bin tags into a list of string tag.
>>> from pypassport.doc9303.datagroup import DataGroup, DataGroupFile
>>> from pypassport.hexfunctions import *
>>> header = None
>>> body = hexRepToBin("5C0A5F0E5F115F425F125F13")
>>> dgf = DataGroupFile()
>>> dg = DataGroup(dgf)
>>> res = dg._parseDataElementPresenceMap(body[0x02:])
>>> res
['5F0E', '5F11', '5F42', '5F12', '5F13']
@param depm: The data element presence map
@type depm: A binary string
@return: A list with the tags found in the data element presence map.
"""
byteNb = self._byteNb
data = self._data
self._byteNb = 0
self._data = depm
tags = []
while self._byteNb < len(depm):
tag = self._getTag()
tags.append(tag)
self._byteNb = byteNb
self._data = data
return tags
class DataGroup1(DataGroup):
"""
Implement the DataGroup1 parsing
"""
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
super(DataGroup1, self).parse()
data = self["5F1F"]
docType = self._getMRZType(len(data))
if docType == "ID1":
self._parseTd1(data)
elif docType == "TD2":
self._parseTd2(data)
else:
self._parseOther(data)
return self
def _parseTd1(self, data):
self["5F03"] = data[0:2]
self["5F28"] = data[2:5]
self["5A"] = data[5:14]
self["5F04"] = data[14:15]
self["53"] = [data[15:30]]
self["5F57"] = data[30:36]
self["5F05"] = data[36:37]
self["5F35"] = data[37:38]
self["59"] = data[38:44]
self["5F06"] = data[44:45]
self["5F2C"] = data[45:48]
self["53"].append( data[48:59] )
self["5F07"] = data[59:60]
self["5B"] = data[60:]
def _parseTd2(self, data):
self["5F03"] = data[0:2]
self["5F28"] = data[2:5]
self["5B"] = data[5:36]
self["5A"] = data[36:45]
self["5F04"] = data[45:46]
self["5F2C"] = data[46:49]
self["5F57"] = data[49:55]
self["5F05"] = data[55:56]
self["5F35"] = data[56:57]
self["59"] = data[57:63]
self["5F06"] = data[63:64]
self["53"] = data[64:71]
self["5F07"] = data[71:72]
def _parseOther(self, data):
self["5F03"] = data[0:2]
self["5F28"] = data[2:5]
self["5F5B"] = data[5:44]
self["5A"] = data[44:53]
self["5F04"] = data[53]
self["5F2C"] = data[54:57]
self["5F57"] = data[57:63]
self["5F05"] = data[63]
self["5F35"] = data[64]
self["59"] = data[65:71]
self["5F06"] = data[71]
self["53"] = data[72:86]
self["5F02"] = data[86]
self["5F07"] = data[87]
def _getMRZType(self, length):
if length == 0x5A:
return "TD1"
if length == 0x48:
return "TD2"
return "OTHER"
class DataGroup2(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
self._byteNb = 0
#7f61
tag = self._getTag()
length = self._getLength()
#02
tag = self._getTag()
self[tag] = self._getValue()
nbInstance = binToHex(self[tag])
for x in range(nbInstance):
#7F60
tag = self._getTag()
self._getLength()
#A1
templateID = self._getTag()
#Read A
v = self._getValue()
dgf = DataGroupFile()
dgf.body = v
dg = DataGroup(dgf)
dg.parse()
data = dg
#Transform the binary data into usable data
for x in data:
data[x] = binToHexRep(data[x])
#5F2E or 7F2E
tag = self._getTag()
value = self._getValue()
headerSize, data['meta'] = ISO19794_5.analyse(binToHexRep(value))
data[tag] = value[headerSize:]
self[templateID] = {}
self[templateID] = data
return self
class DataGroup3(DataGroup2):
def __init__(self, dgFile):
DataGroup2.__init__(self, dgFile)
class DataGroup4(DataGroup2):
def __init__(self, dgFile):
DataGroup2.__init__(self, dgFile)
class DataGroup5(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
"""
The returned value is a dictionary with two keys:
1. '02': The number of instances
2. '5F40' or '5F43' : A list of displayed portrait or A list of displayed signature"
The value is a list of list
ex:
- {'02': [2], '5F40' : [[0x..,0x..,0x..], [0x..,0x..,0x..]]}
- {'02': [1], '5F43' : [[0x..,0x..,0x..]]}
Each values of the dictionnary are in a list of hexadecimal/decimal values.
"""
self._byteNb = 0
tag = self._getTag()
self[tag] = self._getValue()
nbInstance = binToHex(self[tag])
data = []
for x in range(nbInstance):
tag = self._getTag()
data.append(self._getValue())
self[tag] = data
return self
class DataGroup6(DataGroup5):
def __init__(self, dgFile):
DataGroup5.__init__(self, dgFile)
class DataGroup7(DataGroup5):
def __init__(self, dgFile):
DataGroup5.__init__(self, dgFile)
class DataGroup8(DataGroup5):
def __init__(self, dgFile):
DataGroup5.__init__(self, dgFile)
class DataGroup9(DataGroup5):
def __init__(self, dgFile):
DataGroup5.__init__(self, dgFile)
class DataGroup10(DataGroup5):
def __init__(self, dgFile):
DataGroup5.__init__(self, dgFile)
class DataGroup11(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
super(DataGroup11, self).parse()
if self.has_key("5F2B"):
if len(binToHexRep(self["5F2B"])) == 8:
self["5F2B"] = binToHexRep(self["5F2B"])
return self
class DataGroup12(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
super(DataGroup12, self).parse()
if self.has_key("5F26"):
if len(binToHexRep(self["5F26"])) == 8:
self["5F26"] = binToHexRep(self["5F26"])
if self.has_key("5F55"):
if len(binToHexRep(self["5F55"])) == 14:
self["5F26"] = binToHexRep(self["5F55"])
return self
class DataGroup13(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
class DataGroup14(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)#Reserved for future use (RFU)
def parse(self):
return self
class DataGroup15(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
return self
class DataGroup16(DataGroup):
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
#Read the number of templates
self._tagOffset = 0
tag = self._getTag()
nbInstance = binToHex(self._getValue())
for i in range(nbInstance):
#Read each Template Element
tag = self._getTag()
self[i] = self._parseTemplate(self._getValue())
return self
class Com(DataGroup):
"""
Implement the parsing of the com file
"""
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
class SOD(DataGroup):
"""
Implement the sod parsing
"""
def __init__(self, dgFile):
DataGroup.__init__(self, dgFile)
def parse(self):
return self
class DataGroupFactory(Singleton, Logger):
def __init__(self):
Logger.__init__(self, "DataGroup")
def create(self, dgFile):
dg = eval(converter.toClass(dgFile.tag))(dgFile)
try:
dg.parse()
except Exception, msg:
self.log("Parsing failed: " + str(msg), converter.toDG(dg.tag))
return dg
class Events(object):
def __init__(self):
self._listeners = []
def register(self, fct):
"""the listener gives the method he want as callback"""
self._listeners.append(fct)
def unregister(self, listener):
self._listeners.remove(listener)
def log(self, msg):
for listenerFct in self._listeners:
listenerFct(msg)
class DataGroupReader(Logger):
"""
Read a specific dataGroup from the passport.
This is the superclass defining the interface for the classes implementing the reading.
"""
def __init__(self, iso7816, maxSize = 0xDF):
"""
@param iso7816: The layer sending iso7816 apdu to the reader.
@type iso7816: A iso7816 object
@param maxSize: The maximum buffer size accepted by the reader.
@type maxSize: An integer (hexa)
"""
Logger.__init__(self, "DataGroupReader")
self._iso7816 = iso7816
self._file = DataGroupFile()
self._bodySize = 0
self._bodyOffset = 0 #The beginning of the body data
self._offset = 0
self._maxSize = maxSize
self.processed = Events()
def readDG(self, dg):
"""
Read the specified dataGroup and return the file in two parts:
A dataGroup::
6C 40
5C 06 5F195F265F1A
5F19 18 UNITED STATES OF AMERICA
5F26 08 20020531
5F1A 0F SMITH<<BRENDA<P
1. The header::
6C 40
2. The body ::
5C 06 5F195F265F1A
5F19 18 UNITED STATES OF AMERICA
5F26 08 20020531
5F1A 0F SMITH<<BRENDA<P
"""
self.stop = False
self.offset = 0
self._selectFile(dg)
self._file = DataGroupFile()
self._file.header = self._readHeader(dg)
self._file.body = self._readBody()
return self._file
def _selectFile(self):
raise DataGroupException("Should be implemented")
def _readHeader(self, dg):
header = self._iso7816.readBinary(self.offset, 4)
(self._bodySize, self.offset) = asn1Length(header[1:])
self.offset += 1
if(converter.toTAG(dg) != binToHexRep(header[0])):
raise Exception, "Wrong AID: " + binToHexRep(header[0]) + " instead of " + str(self.file.tag)
return header[:self.offset]
def _readBody(self):
body = ""
toRead = self._bodySize
while not self.stop and toRead > self._maxSize:
tmp = self._iso7816.readBinary(self.offset, self._maxSize)
body += tmp
toRead -= self._maxSize
self.offset += self._maxSize
if self.stop:
self.log('reading aborded')
self.stop = False
raise Exception("reading aborded")
tmp = self._iso7816.readBinary(self.offset, toRead)
self.offset += len(tmp)
body += tmp
if self._bodySize != len(body):
raise Exception, "The file is not entirely read: expected: " + str(self._bodySize) + " read: " + str(len(body))
return body
def _getOffset(self):
return self._offset
def _setOffset(self, value):
self._offset = value
if len(self._file.header) + value != 0:
v = int((float(value) / float((len(self._file.header) + self._bodySize)))*100)
self.processed.log(v)
offset = property(_getOffset, _setOffset)
class FSDataGroupReader(DataGroupReader):
"""
Implement the superClass dataGroupReader.
Implement the reading using FS
"""
def __init__(self, iso7816, maxSize = 0xDF):
DataGroupReader.__init__(self, iso7816, maxSize)
def _selectFile(self, tag):
self._iso7816.selectFile("02", "0C", converter.toFID(tag))
class SFIDataGroupReader(DataGroupReader):
"""
Implement the superClass dataGroupReader.
Implement the reading using ShortFileIdentifier
"""
def __init__(self, iso7816, maxSize = 0xDF):
DataGroupReader.__init__(self, iso7816, maxSize)
def _selectFile(self, tag):
#Read the AID + the body size
SFI = (hexRepToHex(converter.toSEF(tag)) ^ 0x80) * 256
self._offset = SFI
class DataGroupReaderFactory(Singleton):
reader = {
"FS": FSDataGroupReader,
"SFI": SFIDataGroupReader,
}
def create(self, iso7816, reader="FS"):
return self.reader[reader](iso7816)
class DataGroupDump(object):
"""
Save the passport, a specific dataGroup or some data to the disk.
"""
def __init__(self, path, ext=""):
"""
@param path: The path where the dump will be stored.
@param ext: File extension
@type path: A string
@raise Exception: If the specified directory in invalid.
"""
if os.path.isdir(path):
self._path = path
self._path += os.path.sep
self._ext = ext
else:
raise Exception, path + " is not a valid directory"
def dump(self, ep, format=converter.types.FID):
"""
Save the dataGroup binaries on the HDD.
The name format is specified by the format parameter.
@param ep: The EPassport object.
@type ep: A dictionary
@param format: Specify the file name format. (FID, TAG, SEF,...)
@type format: An element out of the converter.types enumeration.
"""
for tag in ep:
self.dumpDG(ep[tag], format)
def dumpDG(self, dg, format=converter.types.FID):
"""
Save the specified dataGroup on the HDD.
@param dg: A filled dataGroup object
@type dg: A dataGroup object
@param format: Specify the file name format. (FID, TAG, SEF,...)
@type format: An element out of the converter.types enumeration.
"""
f = open(self._path + converter.to(format, dg.tag) + self._ext, "wb")
f.write(dg.file)
f.close()
def dumpData(self, data, name):
"""
Save some data on the HDD. The data can be the binary of a picture for example.
It will be saved under the name passed as parameter.
@param data: The binary to save on the HDD
@type data: A binary string
@param name: The file name
@type name: A string
"""
if data == None:
return
f = open(self._path + name, "wb")
f.write(data)
f.close()
| tonikelope/python-passport-trace-attack | pypassport/doc9303/datagroup.py | Python | gpl-2.0 | 19,795 |
# -*- coding: utf-8 -*-
"""This file contains a plugin for parsing Google Analytics cookies."""
import urllib
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.parsers.cookie_plugins import interface
from plaso.parsers.cookie_plugins import manager
class GoogleAnalyticsEvent(time_events.PosixTimeEvent):
"""A simple placeholder for a Google Analytics event."""
DATA_TYPE = u'cookie:google:analytics'
def __init__(
self, timestamp, timestamp_desc, url, cookie_identifier, **kwargs):
"""Initialize a Google Analytics event.
Args:
timestamp: The timestamp in a POSIX format.
timestamp_desc: A string describing the timestamp.
url: The full URL where the cookie got set.
cookie_identifier: String to uniquely identify the cookie.
"""
data_type = u'{0:s}:{1:s}'.format(self.DATA_TYPE, cookie_identifier)
super(GoogleAnalyticsEvent, self).__init__(
timestamp, timestamp_desc, data_type)
self.cookie_name = u'__{0:s}'.format(cookie_identifier)
self.url = url
# TODO: refactor, this approach makes it very hard to tell
# which values are actually set.
for key, value in iter(kwargs.items()):
setattr(self, key, value)
class GoogleAnalyticsUtmaPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utma Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<visitor ID>.<first visit>.<previous>.<last>.<# of sessions>
For example:
137167072.1215918423.1383170166.1383170166.1383170166.1
"""
NAME = u'google_analytics_utma'
DESCRIPTION = u'Google Analytics utma cookie parser'
COOKIE_NAME = u'__utma'
# Point to few sources for URL information.
URLS = [
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **unused_kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
cookie_data: The cookie data, as a byte string.
url: The full URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
# Check for a valid record.
if len(fields) != 6:
raise errors.WrongPlugin(u'Wrong number of fields. [{0:d} vs. 6]'.format(
len(fields)))
domain_hash, visitor_id, first_visit, previous, last, sessions = fields
# TODO: Double check this time is stored in UTC and not local time.
try:
first_epoch = int(first_visit, 10)
except ValueError:
first_epoch = 0
try:
sessions = int(sessions, 10)
except ValueError:
sessions = 0
try:
previous = int(previous, 10)
except ValueError:
previous = 0
try:
last = int(last, 10)
except ValueError:
last = 0
event_object = GoogleAnalyticsEvent(
first_epoch, u'Analytics Creation Time', url, u'utma',
domain_hash=domain_hash, visitor_id=visitor_id, sessions=sessions)
parser_mediator.ProduceEvent(event_object)
event_object = GoogleAnalyticsEvent(
previous, u'Analytics Previous Time', url, u'utma',
domain_hash=domain_hash, visitor_id=visitor_id, sessions=sessions)
parser_mediator.ProduceEvent(event_object)
event_object = GoogleAnalyticsEvent(
last, eventdata.EventTimestamp.LAST_VISITED_TIME, url, u'utma',
domain_hash=domain_hash, visitor_id=visitor_id,
sessions=sessions)
parser_mediator.ProduceEvent(event_object)
class GoogleAnalyticsUtmbPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmb Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<pages viewed>.10.<last time>
For example:
137167072.1.10.1383170166
"""
NAME = u'google_analytics_utmb'
DESCRIPTION = u'Google Analytics utmb cookie parser'
COOKIE_NAME = u'__utmb'
# Point to few sources for URL information.
URLS = [
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **unused_kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
cookie_data: The cookie data, as a byte string.
url: The full URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
# Check for a valid record.
if len(fields) != 4:
raise errors.WrongPlugin(u'Wrong number of fields. [{0:d} vs. 4]'.format(
len(fields)))
domain_hash, pages_viewed, _, last = fields
try:
last = int(last, 10)
except ValueError:
last = 0
try:
pages_viewed = int(pages_viewed, 10)
except ValueError:
pages_viewed = 0
event_object = GoogleAnalyticsEvent(
last, eventdata.EventTimestamp.LAST_VISITED_TIME, url, u'utmb',
domain_hash=domain_hash, pages_viewed=pages_viewed)
parser_mediator.ProduceEvent(event_object)
class GoogleAnalyticsUtmzPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmz Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<last time>.<sessions>.<sources>.<variables>
For example:
207318870.1383170190.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|
utmctr=(not%20provided)
"""
NAME = u'google_analytics_utmz'
DESCRIPTION = u'Google Analytics utmz cookie parser'
COOKIE_NAME = u'__utmz'
# Point to few sources for URL information.
URLS = [
(u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **unused_kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
cookie_data: The cookie data, as a byte string.
url: The full URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
if len(fields) > 5:
variables = u'.'.join(fields[4:])
fields = fields[0:4]
fields.append(variables)
if len(fields) != 5:
raise errors.WrongPlugin(u'Wrong number of fields. [{0:d} vs. 5]'.format(
len(fields)))
domain_hash, last, sessions, sources, variables = fields
extra_variables = variables.split(u'|')
kwargs = {}
for variable in extra_variables:
key, _, value = variable.partition(u'=')
# Cookies can have a variety of different encodings, usually ASCII or
# UTF-8, and values may additionally be URL encoded. urllib only correctly
# url-decodes ASCII strings, so we'll convert our string to ASCII first.
try:
ascii_value = value.encode(u'ascii')
except UnicodeEncodeError:
ascii_value = value.encode(u'ascii', errors=u'ignore')
parser_mediator.ProduceParseError(
u'Cookie contains non 7-bit ASCII characters. The characters have '
u'been removed')
utf_stream = urllib.unquote(ascii_value)
try:
value_line = utf_stream.decode(u'utf-8')
except UnicodeDecodeError:
value_line = utf_stream.decode(u'utf-8', errors=u'replace')
parser_mediator.ProduceParseError(
u'Cookie value did not decode to value unicode string. Non UTF-8 '
u'characters have been replaced.')
kwargs[key] = value_line
try:
last = int(last, 10)
except ValueError:
last = 0
try:
sessions = int(sessions, 10)
except ValueError:
sessions = 0
try:
sources = int(sources, 10)
except ValueError:
sources = 0
event_object = GoogleAnalyticsEvent(
last, eventdata.EventTimestamp.LAST_VISITED_TIME, url, u'utmz',
domain_hash=domain_hash, sessions=sessions,
sources=sources, **kwargs)
parser_mediator.ProduceEvent(event_object)
manager.CookiePluginsManager.RegisterPlugins([
GoogleAnalyticsUtmaPlugin, GoogleAnalyticsUtmbPlugin,
GoogleAnalyticsUtmzPlugin])
| jorik041/plaso | plaso/parsers/cookie_plugins/ganalytics.py | Python | apache-2.0 | 8,233 |
class LSA(object):
def __init__(self,input_path,output_path):
super(LSA,self).__init__()
self.input_path = input_path
self.output_path = output_path
self.hpfx = 'k, bins: [' | scottdaniel/LatentStrainAnalysis | LSA/LSA.py | Python | mit | 182 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
"Preference"
import gettext
import gtk
import copy
from tryton.gui.window.view_form.screen import Screen
from tryton.config import TRYTON_ICON
import tryton.common as common
from tryton.common import RPCExecute, RPCException
from tryton.gui.window.nomodal import NoModal
import tryton.rpc as rpc
_ = gettext.gettext
class Preference(NoModal):
"Preference window"
def __init__(self, user, callback):
NoModal.__init__(self)
self.callback = callback
self.win = gtk.Dialog(_('Preferences'), self.parent,
gtk.DIALOG_DESTROY_WITH_PARENT)
self.win.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.win.set_has_separator(False)
self.win.set_icon(TRYTON_ICON)
self.accel_group = gtk.AccelGroup()
self.win.add_accel_group(self.accel_group)
self.but_cancel = self.win.add_button(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL)
self.but_ok = self.win.add_button(gtk.STOCK_OK, gtk.RESPONSE_OK)
self.but_ok.add_accelerator('clicked', self.accel_group,
gtk.keysyms.Return, gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
self.win.set_default_response(gtk.RESPONSE_OK)
self.win.connect('response', self.response)
try:
view = RPCExecute('model', 'res.user',
'get_preferences_fields_view')
except RPCException:
self.win.destroy()
self.win = None
return
title = gtk.Label(_('Edit User Preferences'))
title.show()
self.win.vbox.pack_start(title, expand=False, fill=True)
self.screen = Screen('res.user', mode=[])
# Reset readonly set automaticly by MODELACCESS
self.screen.readonly = False
self.screen.group.readonly = False
self.screen.group.skip_model_access = True
self.screen.add_view(view)
self.screen.switch_view()
self.screen.new(default=False)
try:
preferences = RPCExecute('model', 'res.user', 'get_preferences',
False)
except RPCException:
self.win.destroy()
self.win = None
return
self.screen.current_record.set(preferences)
self.screen.current_record.id = rpc._USER
self.screen.current_record.validate(softvalidation=True)
self.screen.display(set_cursor=True)
self.screen.widget.show()
self.win.vbox.pack_start(self.screen.widget)
self.win.set_title(_('Preference'))
width, height = self.parent.get_size()
self.win.set_default_size(int(width * 0.9), int(height * 0.9))
self.register()
self.win.show()
def response(self, win, response_id):
if response_id == gtk.RESPONSE_OK:
if self.screen.current_record.validate():
vals = copy.copy(self.screen.get())
if 'password' in vals:
password = common.ask(_('Current Password:'),
visibility=False)
if not password:
return
else:
password = False
try:
RPCExecute('model', 'res.user', 'set_preferences',
vals, password)
except RPCException:
return
self.parent.present()
self.destroy()
self.callback()
def destroy(self):
self.screen.destroy()
self.win.destroy()
NoModal.destroy(self)
| kret0s/gnuhealth-live | tryton/client/tryton/gui/window/preference.py | Python | gpl-3.0 | 3,674 |
#!/usr/bin/env python
#
# $Id$
#
"""Routines common to all posix systems."""
import os
import errno
import subprocess
import psutil
import socket
import re
import sys
import warnings
import time
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._compat import namedtuple
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def wait_pid(pid, timeout=None):
"""Wait for process with pid 'pid' to terminate and return its
exit status code as an integer.
If pid is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired.
"""
def check_timeout():
if timeout:
if time.time() >= stop_at:
raise TimeoutExpired
time.sleep(0.001)
if timeout:
waitcall = lambda: os.waitpid(pid, os.WNOHANG)
stop_at = time.time() + timeout
else:
waitcall = lambda: os.waitpid(pid, 0)
while 1:
try:
retpid, status = waitcall()
except OSError, err:
if err.errno == errno.EINTR:
check_timeout()
continue
elif err.errno == errno.ECHILD:
# not a child of os.getpid(): poll until pid has
# disappeared and return None instead
while 1:
if pid_exists(pid):
check_timeout()
else:
return
else:
raise
else:
if retpid == 0:
check_timeout()
continue
# process exited due to a signal; return the integer of
# that signal
if os.WIFSIGNALED(status):
return os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("unknown process exit status")
class LsofParser:
"""A wrapper for lsof command line utility.
Executes lsof in subprocess and parses its output.
"""
socket_table = {'TCP' : socket.SOCK_STREAM,
'UDP' : socket.SOCK_DGRAM,
'IPv4' : socket.AF_INET,
'IPv6' : socket.AF_INET6}
_openfile_ntuple = namedtuple('openfile', 'path fd')
_connection_ntuple = namedtuple('connection', 'fd family type local_address '
'remote_address status')
def __init__(self, pid, name):
self.pid = pid
self.process_name = name
def get_process_open_files(self):
"""Return files opened by process by parsing lsof output."""
# Options:
# -i == network files only
# -a == ANDing of all options
# -p == process with given PID only
# -n == do not resolve IP addresses
# -P == do not resolve port numbers
# -w == suppresses warnings
# -F0nPt == (0) separate lines with "\x00"
# (n) file name
# (t) file type
# (f) file descriptr
cmd = "lsof -a -p %s -n -P -F0ftn" % self.pid
stdout = self.runcmd(cmd)
if not stdout:
return []
files = []
lines = stdout.split("\n")
del lines[0] # first line contains the PID
for line in lines:
if not line:
continue
line = line.strip("\x00")
fields = {}
for field in line.split("\x00"):
key, value = field[0], field[1:]
fields[key] = value
if not 't' in fields:
continue
_type = fields['t']
fd = fields['f']
name = fields['n']
if 'REG' in _type and fd.isdigit():
if not os.path.isfile(os.path.realpath(name)):
continue
ntuple = self._openfile_ntuple(name, int(fd))
files.append(ntuple)
return files
def get_process_connections(self):
"""Return connections opened by a process by parsing lsof output."""
# Options:
# -i == network files only
# -a == ANDing of all options
# -p == process with given PID only
# -n == do not resolve IP addresses
# -P == do not resolve port numbers
# -w == suppresses warnings
# -F0nPt == (0) separate lines with "\x00"
# (n) and show internet addresses only
# (P) protocol type (TCP, UPD, Unix)
# (t) socket family (IPv4, IPv6)
# (T) connection status
# (f) file descriptors
cmd = "lsof -p %s -i -a -F0nPtTf -n -P" % self.pid
stdout = self.runcmd(cmd)
if not stdout:
return []
connections = []
lines = stdout.split()
del lines[0] # first line contains the PID
for line in lines:
line = line.strip("\x00")
fields = {}
for field in line.split("\x00"):
if field.startswith('T'):
key, value = field.split('=')
else:
key, value = field[0], field[1:]
fields[key] = value
# XXX - might trow execption; needs "continue on unsupported
# family or type" (e.g. unix sockets)
# we consider TCP and UDP sockets only
stype = fields['P']
if stype not in self.socket_table:
continue
else:
_type = self.socket_table[fields['P']]
family = self.socket_table[fields['t']]
peers = fields['n']
fd = int(fields['f'])
if _type == socket.SOCK_STREAM:
status = fields['TST']
# OS X shows "CLOSED" instead of "CLOSE" so translate them
if status == "CLOSED":
status = "CLOSE"
else:
status = ""
if not '->' in peers:
local_addr = self._normaddress(peers, family)
remote_addr = ()
# OS X processes e.g. SystemUIServer can return *:* for local
# address, so we return 0 and move on
if local_addr == 0:
continue
else:
local_addr, remote_addr = peers.split("->")
local_addr = self._normaddress(local_addr, family)
remote_addr = self._normaddress(remote_addr, family)
conn = self._connection_ntuple(fd, family, _type, local_addr,
remote_addr, status)
connections.append(conn)
return connections
def runcmd(self, cmd):
"""Expects an lsof-related command line, execute it in a
subprocess and return its output.
If something goes bad stderr is parsed and proper exceptions
raised as necessary.
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if sys.version_info >= (3,):
stdout, stderr = map(lambda x: x.decode(sys.stdout.encoding),
(stdout, stderr))
if stderr:
utility = cmd.split(' ')[0]
if self._which(utility) is None:
msg = "this functionnality requires %s command line utility " \
"to be installed on the system" % utility
raise NotImplementedError(msg)
elif "permission denied" in stderr.lower():
# "permission denied" can be found also in case of zombie
# processes;
p = psutil.Process(self.pid)
if not p.is_running():
raise NoSuchProcess(self.pid, self.process_name)
raise AccessDenied(self.pid, self.process_name)
elif "lsof: warning:" in stderr.lower():
# usually appears when lsof is run for the first time and
# complains about missing cache file in user home
warnings.warn(stderr, RuntimeWarning)
else:
# this must be considered an application bug
raise RuntimeError(stderr)
if not stdout:
p = psutil.Process(self.pid)
if not p.is_running():
raise NoSuchProcess(self.pid, self.process_name)
return ""
return stdout
@staticmethod
def _which(program):
"""Same as UNIX which command. Return None on command not found."""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def _normaddress(addr, family):
"""Normalize an IP address."""
assert family in (socket.AF_INET, socket.AF_INET6), "unsupported family"
if family == socket.AF_INET:
ip, port = addr.split(':')
else:
if "]" in addr:
ip, port = re.findall('\[([^]]+)\]:([0-9]+)', addr)[0]
else:
ip, port = addr.split(':')
if ip == '*':
if family == socket.AF_INET:
ip = "0.0.0.0"
elif family == socket.AF_INET6:
ip = "::"
# OS X can have some procs e.g. SystemUIServer listening on *:*
else:
raise ValueError("invalid IP %s" %addr)
if port == "*":
return 0
return (ip, int(port))
| elventear/psutil | psutil/_psposix.py | Python | bsd-3-clause | 10,468 |
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2017 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# PX4 module documentation processor (main executable file)
#
# This tool scans the PX4 source code for declarations of module documentations
# in the form PRINT_MODULE_* and converts them into Mardown output
#
from __future__ import print_function
import sys
import os
import argparse
from px4moduledoc import srcscanner, srcparser, markdownout
import re
import json
import codecs
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Process module documentation.")
parser.add_argument("-s", "--src-path",
default=["../src"],
metavar="PATH",
nargs='*',
help="one or more paths to source files to scan for parameters")
parser.add_argument("-m", "--markdown",
nargs='?',
const=".",
metavar="DIRECTORY",
help="Markdown output directory"
" (default DIRECTORY: .)")
parser.add_argument('--no-validation', action='store_true', help="do not fail if consistency checks fail")
parser.add_argument('-v', '--verbose', action='store_true', help="verbose output")
args = parser.parse_args()
# Check for valid command
if not (args.markdown):
print("Error: You need to specify at least one output method!")
parser.print_usage()
sys.exit(1)
# Initialize source scanner and parser
scanner = srcscanner.SourceScanner()
parser = srcparser.SourceParser()
# Scan directories, and parse the files
if (args.verbose): print("Scanning source path " + str(args.src_path))
if not scanner.ScanDir(args.src_path, parser):
sys.exit(1)
if not args.no_validation and parser.HasValidationFailure():
print("Error: validation failed")
sys.exit(1)
module_groups = parser.GetModuleGroups()
# Output to Markdown/HTML tables
if args.markdown:
if args.verbose: print("Creating markdown output to directory " + str(args.markdown))
if not os.path.exists(args.markdown):
os.makedirs(args.markdown)
out = markdownout.MarkdownOutput(module_groups)
out.Save(args.markdown)
if __name__ == "__main__":
main()
| PX4/Firmware | Tools/px_process_module_doc.py | Python | bsd-3-clause | 4,029 |
# Copyright (C) 2011 Alexey Agapitov
# This file is part of Ktope.
#
# Ktope is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def eulerCycle(g):
st=[]
st.append(0)
res=[]
size=len(g)
while len(st)!=0:
v=st[-1]
i=0
for k in range(size):
if g[v][i]!=0:
break
i+=1
if i==size:
res.append(v+1)
st.pop()
else:
g[v][i]-=1
g[i][v]-=1
st.append(i)
return res
import sys
conf=None
if len(sys.argv)==1:
print('Please specify command to execute')
exit()
if sys.argv[1].startswith('web'):
import cherrypy
import os.path
conf = os.path.join(os.path.dirname(__file__), 'cherrypy.conf')
from ktope.web import hw2,hw3,hw5,main
if sys.argv[1]=='web':
cherrypy.tree.mount(hw2.Hw2Page(), '/hw2', config=conf)
cherrypy.tree.mount(hw3.Hw3Page(), '/hw3', config=conf)
cherrypy.tree.mount(hw5.Hw5Page(), '/hw5', config=conf)
cherrypy.quickstart(main.KtoPage(), config=conf)
elif sys.argv[1]=='web.hw2':
cherrypy.quickstart(hw2.Hw2Page(), '/', config=conf)
elif sys.argv[1]=='web.hw3':
cherrypy.quickstart(hw3.Hw3Page(), '/', config=conf)
elif sys.argv[1]=='web.hw5':
cherrypy.quickstart(hw5.Hw5Page(), '/', config=conf)
elif sys.argv[1]=='cli.hw5':
if len(sys.argv)<2:
print('Please specify a file with circuits')
exit()
from ktope.cli import hw5
hw5.main(sys.argv[2])
elif sys.argv[1]=='euler':
if len(sys.argv)<2:
print('Please specify a file with circuits')
exit()
from ktope import hw2
import fileinput
lines=[]
#only for python >=3.2
'''with fileinput.input(files=(sys.argv[2])) as f:
for line in f:
lines.append(line)
'''
finp=fileinput.input(files=(sys.argv[2]))
for line in finp:
lines.append(line)
finp.close()
circuits=hw2.buildCircuits(lines)
elements=hw2.getElements(circuits)
connMatrix=hw2.buildConnMatrix(circuits,elements)
print(eulerCycle(connMatrix))
| marwinxxii/ktope | app.py | Python | agpl-3.0 | 2,718 |
import re
from wsgiref.simple_server import make_server
from wurfl_cloud import Cloud
from wurfl_cloud import utils
class WurflCheckMiddleware(object):
# Example WSGI Middleware library to detect visitor device
# and load its capablities in the local WSGI environment
def __init__(self, wrap_app):
self.wrap_app = wrap_app
# Create a Wurfl Cloud Config
config = utils.load_config('filecache_config.conf')
# Create a WURFL Cache Loader
cache = utils.get_cache(config)
# Create a WURFL Cloud Client
self.Client = Cloud(config, cache)
def __call__(self, environ, start_response):
# Detect the visitor's device
try:
device = self.Client(environ.get('HTTP_USER_AGENT'), \
capabilities=["ux_full_desktop", "model_name", "brand_name"])
if device["errors"]:
# Error
print "Error: ", device["errors"]
else:
environ['myapp.device_capabilities'] = device["capabilities"]
except LookupError as e:
print "Error: ", e
return self.wrap_app(environ, start_response)
def index(environ, start_response):
# This function will be exected on "/"
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''<h2>Python's WURFLCloud Examples:</h2>
<ul>
<li><a href="detect">Desktop or Mobile Device?</a></li>
<li><a href="show_capabilities">Show all capabilities</a></li>
</ul>
''']
# Detect User-Agent as Desktop or Mobile devices
def detect(environ, start_response):
ua = environ['HTTP_USER_AGENT']
# Get Device Capabilities
if not environ.get('myapp.device_capabilities'):
capabilities = {"error": True}
else:
capabilities = environ['myapp.device_capabilities']
if not "error" in capabilities:
# Is Desktop or Mobile?
if capabilities["ux_full_desktop"]:
result = '<h1>This is a desktop browser.</h1>'
else:
result = '<h1>This is a mobile device.</h1>'
result += '<p><b>Device:</b> %(brand)s %(model)s</p>' % \
{'brand': capabilities["brand_name"], \
'model': capabilities["model_name"]}
else:
return not_found(environ, start_response)
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''%(result)s<p><b>User-Agent:</b> %(ua)s</p>''' % \
{'result': str(result), 'ua': str(ua)}]
# List Device's Capabilities
def show_capabilities(environ, start_response):
# Get Device Capabilities
if not environ.get('myapp.device_capabilities'):
capabilities = {"error": 'empty'}
else:
capabilities = environ['myapp.device_capabilities']
result = '<ul>'
# Show all the capabilities returned by the WURFL Cloud Service
for capability in capabilities:
result += '<li><strong>%(key)s</strong>: %(value)s</li>' % \
{"key": capability, "value": capabilities[capability]}
result += '</ul>'
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''
<h2>Device Capabilities:</h2><p>%(data)s</p>
<h2>WSGI environment for a request:</h2><p>%(env)s</p>
''' % {'data': str(result), 'env': str(environ)}]
def not_found(environ, start_response):
# Called if no URL matches.
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
return ['Not Found']
# map urls to functions
urls = [
(r'^$', index),
(r'detect/?$', detect),
(r'show_capabilities/?$', show_capabilities),
]
def application(environ, start_response):
# The main WSGI application. Dispatch the current request to
# the functions from above and store the regular expression
# captures in the WSGI environment as `myapp.url_args` so that
# the functions from above can access the url placeholders.
#
# If nothing matches call the `not_found` function.
path = environ.get('PATH_INFO', '').lstrip('/')
for regex, callback in urls:
match = re.search(regex, path)
if match is not None:
environ['myapp.url_args'] = match.groups()
return callback(environ, start_response)
return not_found(environ, start_response)
wurfl_check_wrapper = WurflCheckMiddleware(application)
httpd = make_server('', 8000, wurfl_check_wrapper)
print "Serving WURFL demo on port 8000..."
httpd.serve_forever()
| WURFL/wurfl-cloud-client-python | examples/example_web.py | Python | gpl-2.0 | 4,451 |
"""Unit test suite for HXL proxy."""
import os
import re
import hxl
import unittest.mock
#
# Mock URL access for local testing
#
def mock_open_url(url, allow_local=False, timeout=None, verify_ssl=True, http_headers=None):
"""
Open local files instead of URLs.
If it's a local file path, leave it alone; otherwise,
open as a file under ./files/
This is meant as a side effect for unittest.mock.Mock
Arguments are the same as hxl.io.open_url_or_file(), which this replaces
"""
if re.match(r'https?:', url):
if re.match('.*/private/.*', url):
# fake a URL that needs authorisation (if it has /private/ in it)
# if there's no 'Authorization' header, raise an exception
if http_headers is None or not "Authorization" in http_headers:
raise hxl.io.HXLAuthorizationException('Need Authorization header', url)
# Looks like a URL
filename = re.sub(r'^.*/([^/]+)$', '\\1', url)
path = resolve_path('files/' + filename)
else:
# Assume it's a file
path = url
return (open(path, 'rb'), None, None, None)
def resolve_path(filename):
"""Resolve a relative path against this module's directory."""
return os.path.join(os.path.dirname(__file__), filename)
# Target function to replace for mocking URL access.
URL_MOCK_TARGET = 'hxl.io.open_url_or_file'
# Mock object to replace hxl.io.open_url_or_file
URL_MOCK_OBJECT = unittest.mock.Mock()
URL_MOCK_OBJECT.side_effect = mock_open_url
# end
| HXLStandard/hxl-proxy | tests/__init__.py | Python | unlicense | 1,532 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.gcp.hooks.mlengine`."""
import warnings
# pylint: disable=unused-import
from airflow.gcp.hooks.mlengine import MLEngineHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.gcp.hooks.mlengine`.",
DeprecationWarning, stacklevel=2
)
| Fokko/incubator-airflow | airflow/contrib/hooks/gcp_mlengine_hook.py | Python | apache-2.0 | 1,125 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This module contains utilities for doing coverage analysis on the RPC
interface.
It provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `electrum-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w') as f:
f.writelines(list(commands))
return True
| sinraf96/electrum | qa/rpc-tests/test_framework/coverage.py | Python | mit | 2,932 |
#!/usr/bin/env python
# Copyright 2014-2015 @gitagon. For alternative licenses contact the author.
#
# This file is part of streamsearch-py.
# streamsearch-py is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# streamsearch-py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with streamsearch-py. If not, see <http://www.gnu.org/licenses/>.
class StreamBuffer(object):
def __init__(self):
super(StreamBuffer, self).__init__()
def get_buffer(self):
""" Returns the buffer array (possibly just a shallow reference)."""
raise NotImplementedError()
def size(self):
""" Size of buffer contents (not the buffer array length)."""
raise NotImplementedError()
def read(self):
""" Reads from the input stream into the buffer array.
Returns the number of bytes read (at least one) or a negative number
to indicate the stream is at end of file
or this object is in an inconsistent buffer state."""
raise NotImplementedError()
| gitagon/streamsearch-py | streamsearch/buffer.py | Python | agpl-3.0 | 1,503 |
"""Viessmann ViCare sensor device."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
import logging
from PyViCare.PyViCareUtils import (
PyViCareInvalidDataError,
PyViCareNotSupportedFeatureError,
PyViCareRateLimitError,
)
import requests
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_POWER,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.const import CONF_NAME
from . import ViCareRequiredKeysMixin
from .const import DOMAIN, VICARE_API, VICARE_CIRCUITS, VICARE_DEVICE_CONFIG
_LOGGER = logging.getLogger(__name__)
SENSOR_CIRCULATION_PUMP_ACTIVE = "circulationpump_active"
SENSOR_BURNER_ACTIVE = "burner_active"
SENSOR_COMPRESSOR_ACTIVE = "compressor_active"
@dataclass
class ViCareBinarySensorEntityDescription(
BinarySensorEntityDescription, ViCareRequiredKeysMixin
):
"""Describes ViCare binary sensor entity."""
CIRCUIT_SENSORS: tuple[ViCareBinarySensorEntityDescription, ...] = (
ViCareBinarySensorEntityDescription(
key=SENSOR_CIRCULATION_PUMP_ACTIVE,
name="Circulation pump active",
device_class=DEVICE_CLASS_POWER,
value_getter=lambda api: api.getCirculationPumpActive(),
),
)
BURNER_SENSORS: tuple[ViCareBinarySensorEntityDescription, ...] = (
ViCareBinarySensorEntityDescription(
key=SENSOR_BURNER_ACTIVE,
name="Burner active",
device_class=DEVICE_CLASS_POWER,
value_getter=lambda api: api.getActive(),
),
)
COMPRESSOR_SENSORS: tuple[ViCareBinarySensorEntityDescription, ...] = (
ViCareBinarySensorEntityDescription(
key=SENSOR_COMPRESSOR_ACTIVE,
name="Compressor active",
device_class=DEVICE_CLASS_POWER,
value_getter=lambda api: api.getActive(),
),
)
def _build_entity(name, vicare_api, device_config, sensor):
"""Create a ViCare binary sensor entity."""
try:
sensor.value_getter(vicare_api)
_LOGGER.debug("Found entity %s", name)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("Feature not supported %s", name)
return None
except AttributeError:
_LOGGER.debug("Attribute Error %s", name)
return None
return ViCareBinarySensor(
name,
vicare_api,
device_config,
sensor,
)
async def _entities_from_descriptions(
hass, name, all_devices, sensor_descriptions, iterables, config_entry
):
"""Create entities from descriptions and list of burners/circuits."""
for description in sensor_descriptions:
for current in iterables:
suffix = ""
if len(iterables) > 1:
suffix = f" {current.id}"
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}{suffix}",
current,
hass.data[DOMAIN][config_entry.entry_id][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
all_devices.append(entity)
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Create the ViCare binary sensor devices."""
name = config_entry.data[CONF_NAME]
api = hass.data[DOMAIN][config_entry.entry_id][VICARE_API]
all_devices = []
for description in CIRCUIT_SENSORS:
for circuit in hass.data[DOMAIN][config_entry.entry_id][VICARE_CIRCUITS]:
suffix = ""
if len(hass.data[DOMAIN][config_entry.entry_id][VICARE_CIRCUITS]) > 1:
suffix = f" {circuit.id}"
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}{suffix}",
circuit,
hass.data[DOMAIN][config_entry.entry_id][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
all_devices.append(entity)
try:
await _entities_from_descriptions(
hass, name, all_devices, BURNER_SENSORS, api.burners, config_entry
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No burners found")
try:
await _entities_from_descriptions(
hass, name, all_devices, COMPRESSOR_SENSORS, api.compressors, config_entry
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No compressors found")
async_add_devices(all_devices)
class ViCareBinarySensor(BinarySensorEntity):
"""Representation of a ViCare sensor."""
entity_description: ViCareBinarySensorEntityDescription
def __init__(
self, name, api, device_config, description: ViCareBinarySensorEntityDescription
):
"""Initialize the sensor."""
self.entity_description = description
self._attr_name = name
self._api = api
self.entity_description = description
self._device_config = device_config
self._state = None
@property
def device_info(self):
"""Return device info for this device."""
return {
"identifiers": {(DOMAIN, self._device_config.getConfig().serial)},
"name": self._device_config.getModel(),
"manufacturer": "Viessmann",
"model": (DOMAIN, self._device_config.getModel()),
}
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None
@property
def unique_id(self):
"""Return unique ID for this device."""
tmp_id = (
f"{self._device_config.getConfig().serial}-{self.entity_description.key}"
)
if hasattr(self._api, "id"):
return f"{tmp_id}-{self._api.id}"
return tmp_id
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update state of sensor."""
try:
with suppress(PyViCareNotSupportedFeatureError):
self._state = self.entity_description.value_getter(self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
except PyViCareRateLimitError as limit_exception:
_LOGGER.error("Vicare API rate limit exceeded: %s", limit_exception)
except PyViCareInvalidDataError as invalid_data_exception:
_LOGGER.error("Invalid data from Vicare server: %s", invalid_data_exception)
| jawilson/home-assistant | homeassistant/components/vicare/binary_sensor.py | Python | apache-2.0 | 6,640 |
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.ospace.Const import *
from ige.IDataHolder import makeIDataHolder
## General
turnsPerDay = 24
rotationMod = 384.0
playerTimeout = 60 * 60 * 24 * 21 # 21 days
novicePlayerTimeout = 60 * 60 * 24 * 7 # 7 days
messageTimeout = 60 * 60 * 24 * 14 # 14 days
## New player
startingPopulation = 9000
startingBio = 1000
startingMin = 1000
startingEn = 1000
startingScannerPwr = 100
## Production
maxProdQueueLen = 10
buildOnSamePlanetMod = 1
buildOnAnotherPlanetMod = 2
unusedProdMod = 0.75
## Environment
envInterval = 1000
envAutoMod = 10.0
envMax = 200
envSelfUpgradeChance = {"H": 5, "C": 1, "B": 500} # in ten thousandths (10 000)
planetSpec = {}
planetSpec[u'A'] = makeIDataHolder(
minBio = 0,
maxBio = 0,
upgradeTo = None,
downgradeTo = None,
)
planetSpec[u'G'] = makeIDataHolder(
minBio = 0,
maxBio = 0,
upgradeTo = None,
downgradeTo = None,
)
planetSpec[u'C'] = makeIDataHolder(
minBio = 0,
maxBio = 6,
upgradeTo = u'D',
upgradeEnReqs = (5, 180),
downgradeTo = None,
)
planetSpec[u'R'] = makeIDataHolder(
minBio = 0,
maxBio = 6,
upgradeTo = u'D',
upgradeEnReqs = (5, 180),
downgradeTo = None,
)
planetSpec[u'D'] = makeIDataHolder(
minBio = 6,
maxBio = 12,
upgradeTo = u'H',
upgradeEnReqs = (25, 150),
downgradeTo = u'R',
)
planetSpec[u'H'] = makeIDataHolder(
minBio = 12,
maxBio = 25,
upgradeTo = u'M',
upgradeEnReqs = (50, 125),
downgradeTo = u'D',
)
planetSpec[u'M'] = makeIDataHolder(
minBio = 25,
maxBio = 75,
upgradeTo = u'E',
upgradeEnReqs = (50, 100),
downgradeTo = u'H',
)
planetSpec[u'E'] = makeIDataHolder(
minBio = 75,
maxBio = 125,
upgradeTo = u"I",
upgradeEnReqs = (50, 100),
downgradeTo = u'M',
)
planetSpec[u"I"] = makeIDataHolder( # gaia
minBio = 125,
maxBio = 200,
upgradeTo = None,
downgradeTo = u"E",
)
## New colony settings
colonyMinBio = 600
colonyMinMin = 600
colonyMinEn = 600
## Storage
popPerSlot = 0
bioPerSlot = 0
minPerSlot = 0
enPerSlot = 0
popBaseStor = 4800
bioBaseStor = 4800
minBaseStor = 4800
enBaseStor = 4800
autoMinStorTurns = 6
autoReqStorTurns = 1
maxPopReserve = 1.125
tlPopReserve = 250
## Resources
stratResRate = turnsPerDay * 6
## Population
popGrowthRate = 0.02
popDieRate = 0.1
popMinDieRate = 100
popKillMod = 0.25
popSlotKillMod = 5 # how many people per 1 DMG get killed when slot is hit
popSlotHP = 100 # HP of habitable structures on slot (where people live)
## Research
maxRsrchQueueLen = 10
techBaseImprovement = 1
techMaxImprovement = 5
techImprCostMod = {1:480, 2:480, 3:720, 4:960, 5:1200, 6: 1440, 7: 1680} #per level
sciPtsPerCitizen = {1: 0, 2: 0.00075, 3: 0.00150, 4: 0.00175, 5: 0.00200, 6: 0.002125, 7: 0.00225, 99: 0} #per level
techImprEff = {1:0.750, 2:0.875, 3:1.000, 4:1.125, 5:1.250} #per sublevel
#maxSciPtsTL = {1:100, 2:200, 3:300, 4:400, 5:500, 6:600, 7:700}
#sciPtsStepFraction = 0.25
## Scanner
maxSignature = 100
scannerMinPwr = 1
level1InfoScanPwr = 1000
level2InfoScanPwr = 1200
level3InfoScanPwr = 1400
level4InfoScanPwr = 1600
maxScanPwr = 200000
mapForgetScanPwr = 0.94
partnerScanPwr = 300000
## Fleets
maxCmdQueueLen = 10
signatureBase = 1.10
operProdRatio = 0.001
combatRetreatWait = 3
starGateDamage = 0.2 # damage for 100% speed boost (double for 200%, etc...)
shipDecayRatio = 0.04
maxDamageAbsorb = 5 # max absorbed damage for tech "damageAbsorb" property.
## Buildings
repairRatio = 0.02
repairRunningRatio = 0.01
decayRatio = 0.02
storCapacityOfOfflineStruct = 1.0
plShieldRegen = 0.05 #regen rate of planetary shield
## Diplomacy
baseRelationChange = -5
relLostWhenAttacked = -1000000
defaultRelation = REL_NEUTRAL
contactTimeout = 6 * turnsPerDay
voteForImpPeriod = 6 * turnsPerDay
ratioNeededForImp = 0.6666
pactDescrs = {}
pactDescrs[PACT_ALLOW_CIVILIAN_SHIPS] = makeIDataHolder(
targetRel = 500,
relChng = 10,
validityInterval = (0, 10000),
)
pactDescrs[PACT_ALLOW_MILITARY_SHIPS] = makeIDataHolder(
targetRel = 750,
relChng = 8,
validityInterval = (0, 10000),
)
pactDescrs[PACT_ALLOW_TANKING] = makeIDataHolder(
targetRel = 750,
relChng = 7,
validityInterval = (0, 10000),
)
pactDescrs[PACT_MINOR_CP_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 6,
effectivity = 0.05,
validityInterval = (625, 10000),
)
pactDescrs[PACT_MAJOR_CP_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
effectivity = 0.05,
validityInterval = (875, 10000),
)
pactDescrs[PACT_SHARE_SCANNER] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
validityInterval = (625, 10000),
)
pactDescrs[PACT_MINOR_SCI_COOP] = makeIDataHolder(
targetRel = 750,
relChng = 1,
effectivity = 0.05,
validityInterval = (625, 10000),
)
pactDescrs[PACT_MAJOR_SCI_COOP] = makeIDataHolder(
targetRel = 1000,
relChng = 1,
effectivity = 0.05,
validityInterval = (875, 10000),
)
## Morale
baseGovPwr = 50000
maxMorale = 100.0
minMoraleTrgt = 30.0
revoltThr = 25.0
moraleChngPerc = 0.03
moraleHighPopPenalty = 2.0
moraleBasePop = 10000
moraleLowPop = 5000
moraleLowPopBonus = 40.0
moraleLostWhenSurrender = 0.0
moraleLostNoFood = 1.0
moraleModPlHit = 96.0 # how many morale point per 1 per cent of damage
moralePerPointChance = 5.0 # for every point below revoltThr % chance for revolt
moraleProdStep = 10
moraleProdBonus = [-0.875, -0.75, -0.625, -0.50, -0.375, -0.25, -0.125, 0.0, 0.0, 0.125, 0.25]
## Revolt
revoltDestrBio = 0.05
revoltDestrMin = 0.05
revoltDestrEn = 0.05
revoltPenalty = 0.75
## Messages
messageMaxAge = turnsPerDay * 3
## Asteroid
asteroidPerHPBioMod = - 0.01
asteroidPerHPMinMod = + 0.001
asteroidGenerPerc = 0.001
asteroidMinPlMinAbund = 10
asteroidModPwr = 2.0
asteroidTargetInSystem = 0.2
asteroidMinHP = 100
asteroidMaxHP = 1000
asteroidMinSpeed = 2.0
asteroidMaxSpeed = 4.0
asteroidMisDef = 1
asteroidDef = 4
asteroidAttack = 4
asteroidImpactDelay = 6
## Projects
projECOINIT3PlBio = 1
## Ships
shipImprovementMod = 1.05
shipMaxImprovements = 5
shipMaxDesigns = 40
shipExpToLevel = {0:1, 1:2, 2:2, 3:3, 4:3, 5:3, 6:3, 7:4, 8:4, 9:4, 10:4, 11:4,
12:4, 13:4, 15:5}
shipDefLevel = 5
shipLevelEff = {1:0.50, 2:0.75, 3:1.00, 4:1.25, 5:1.50}
shipBaseExpMod = 20
shipBaseExp = {0:10, 1:20, 2:40, 3:80, 4:160}
shipTargetPerc = [25, 50, 90, 100]
shipMinUpgrade = 120
shipUpgradeMod = 1.375
shipUpgradePts = [1, 3, 10]
weaponDmgDegrade = [1.0, 0.5, 0.25, 0.125]
## EMR
emrMinDuration = 36
emrMaxDuration = 60
emrPeriod = 576
emrSeasons = [None, None, None, None]
emrSeasons[0] = makeIDataHolder(
name = "spring",
startTime = 0,
endTime = 143,
emrLevelMin = 0.75,
emrLevelMax = 1.25,
)
emrSeasons[1] = makeIDataHolder(
name = "summer",
startTime = 144,
endTime = 287,
emrLevelMin = 0.50,
emrLevelMax = 1.00,
)
emrSeasons[2] = makeIDataHolder(
name = "fall",
startTime = 287,
endTime = 431,
emrLevelMin = 0.50,
emrLevelMax = 1.50,
)
emrSeasons[3] = makeIDataHolder(
name = "winter",
startTime = 432,
endTime = 575,
emrLevelMin = 1.00,
emrLevelMax = 1.50,
)
## Pirates
pirateInfluenceRange = 7.5 # in parsecs
pirateGainFamePropability = lambda d: 2 - d * 0.2
pirateLoseFameProbability = lambda d: 1 - (15 - d) * 0.2
pirateCaptureInRangeFame = 1
pirateSurvivalFame = 1
pirateCaptureOutOfRangeFame = -1
pirateColonyCostMod = 1.5
pirateTL3StratResColonyCostMod = 0.25
pirateGovPwr = int(500000 * 1.25)
pirateCanStealImprovements = 3
pirateGrantHSE = 60*24*3600 #60 days
pirateGrantASSEM = 105*24*3600 #105 days
pirateGrantCOND = 105*24*3600 #105 days
## Bonuses
galLeaderBonus = 0.05
galImperatorBonus = 0.10
## Combat
combatStructureHitMod = 0.75
combatShipHitMod = 0.75
combatHitXferMod = 3.00
combatStructDefense = 1
| OuterDeepSpace/OuterDeepSpace | shared/res/rules/alter/rules.py | Python | gpl-2.0 | 8,361 |
"""
This module provides some useful tools for ``vcs`` like annotate/diff html
output. It also includes some internal helpers.
"""
import time
import datetime
def makedate():
lt = time.localtime()
if lt[8] == 1 and time.daylight:
tz = time.altzone
else:
tz = time.timezone
return time.mktime(lt), tz
def aslist(obj, sep=None, strip=True):
"""
Returns given string separated by sep as list
:param obj:
:param sep:
:param strip:
"""
if isinstance(obj, (basestring)):
lst = obj.split(sep)
if strip:
lst = [v.strip() for v in lst]
return lst
elif isinstance(obj, (list, tuple)):
return obj
elif obj is None:
return []
else:
return [obj]
def date_fromtimestamp(unixts, tzoffset=0):
"""
Makes a local datetime object out of unix timestamp
:param unixts:
:param tzoffset:
"""
return datetime.datetime.fromtimestamp(float(unixts))
def safe_int(val, default=None):
"""
Returns int() of val if val is not convertible to int use default
instead
:param val:
:param default:
"""
try:
val = int(val)
except (ValueError, TypeError):
val = default
return val
def safe_unicode(str_, from_encoding=None):
"""
safe unicode function. Does few trick to turn str_ into unicode
In case of UnicodeDecode error we try to return it with encoding detected
by chardet library if it fails fallback to unicode with errors replaced
:param str_: string to decode
:rtype: unicode
:returns: unicode object
"""
if isinstance(str_, unicode):
return str_
if not from_encoding:
from kallithea.lib.vcs.conf import settings
from_encoding = settings.DEFAULT_ENCODINGS
if not isinstance(from_encoding, (list, tuple)):
from_encoding = [from_encoding]
try:
return unicode(str_)
except UnicodeDecodeError:
pass
for enc in from_encoding:
try:
return unicode(str_, enc)
except UnicodeDecodeError:
pass
try:
import chardet
encoding = chardet.detect(str_)['encoding']
if encoding is None:
raise Exception()
return str_.decode(encoding)
except (ImportError, UnicodeDecodeError, Exception):
return unicode(str_, from_encoding[0], 'replace')
def safe_str(unicode_, to_encoding=None):
"""
safe str function. Does few trick to turn unicode_ into string
In case of UnicodeEncodeError we try to return it with encoding detected
by chardet library if it fails fallback to string with errors replaced
:param unicode_: unicode to encode
:rtype: str
:returns: str object
"""
# if it's not basestr cast to str
if not isinstance(unicode_, basestring):
return str(unicode_)
if isinstance(unicode_, str):
return unicode_
if not to_encoding:
from kallithea.lib.vcs.conf import settings
to_encoding = settings.DEFAULT_ENCODINGS
if not isinstance(to_encoding, (list, tuple)):
to_encoding = [to_encoding]
for enc in to_encoding:
try:
return unicode_.encode(enc)
except UnicodeEncodeError:
pass
try:
import chardet
encoding = chardet.detect(unicode_)['encoding']
if encoding is None:
raise UnicodeEncodeError()
return unicode_.encode(encoding)
except (ImportError, UnicodeEncodeError):
return unicode_.encode(to_encoding[0], 'replace')
def author_email(author):
"""
returns email address of given author.
If any of <,> sign are found, it fallbacks to regex findall()
and returns first found result or empty string
Regex taken from http://www.regular-expressions.info/email.html
"""
if not author:
return ''
import re
r = author.find('>')
l = author.find('<')
if l == -1 or r == -1:
# fallback to regex match of email out of a string
email_re = re.compile(r"""[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!"""
r"""#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z"""
r"""0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]"""
r"""*[a-z0-9])?""", re.IGNORECASE)
m = re.findall(email_re, author)
return m[0] if m else ''
return author[l + 1:r].strip()
def author_name(author):
"""
get name of author, or else username.
It'll try to find an email in the author string and just cut it off
to get the username
"""
if not author:
return ''
if not '@' in author:
return author
return author.replace(author_email(author), '').replace('<', '')\
.replace('>', '').strip()
| msabramo/kallithea | kallithea/lib/vcs/utils/__init__.py | Python | gpl-3.0 | 4,852 |
from __future__ import print_function
from nose.tools import assert_equal
from matplotlib.testing.decorators import knownfailureif
import sys
def test_simple():
assert_equal(1+1,2)
@knownfailureif(True)
def test_simple_knownfail():
assert_equal(1+1,3)
from pylab import *
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'any',
'all',
'sum'
])
if sys.version_info[0] >= 3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
| lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/tests/test_basic.py | Python | mit | 907 |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 17:29:58 2013
@author: cbarbosa
Program to verify results from MCMC runs.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from scipy.optimize import fmin, fminbound
from scipy.integrate import quad
import matplotlib.cm as cm
from matplotlib.mlab import normpdf
from matplotlib.colors import Normalize
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.mixture import GMM
from config import *
from run_ppxf import speclist
class Dist():
""" Simple class to handle the distribution data of MCMC. """
def __init__(self, data, lims, bw=0.005):
self.data = data
self.lims = lims
self.bw = bw
self.genextreme = genextreme(self.data)
self.norm = statdist(self.data, stats.norm, "norm")
# self.truncnorm = statdist(self.data, stats.truncnorm, "truncnorm")
# self.gmm = gmm(self.data)
dists = [self.genextreme, self.norm]
idx = np.argmin([x.ad for x in dists])
self.best = dists[idx]
self.MAPP = self.best.MAPP
self.calc_err()
# # Calculate percentiles
# self.percentileatmapp = stats.percentileofscore(self.data, self.MAPP)
# self.percentilemax = np.minimum(self.percentileatmapp + 34., 100.)
# self.percentilemin = np.maximum(self.percentileatmapp - 34., 0.)
# self.MAPPmin = stats.scoreatpercentile(self.data, self.percentilemin)
# self.MAPPmax = stats.scoreatpercentile(self.data, self.percentilemax)
self.lerr = np.sqrt(self.bw**2 + np.abs(self.MAPP - self.MAPPmin)**2)
self.uerr = np.sqrt(self.bw**2 + np.abs(self.MAPP - self.MAPPmax)**2)
return
def calc_err(self):
""" Calculate error for the best distribution. """
r = np.abs(self.lims[0] - self.lims[1])
def integral(y, return_x=False):
x0 = float(fminbound(lambda x: np.abs(self.best.pdf(x) - y),
self.lims[0], self.best.MAPP, full_output=1)[0])
x1 = float(fminbound(lambda x: np.abs(self.best.pdf(x) - y),
self.best.MAPP, self.lims[1], full_output=1)[0])
if not return_x:
return quad(self.best.pdf, x0, x1)[0]
else:
return x0, x1
y = fmin(lambda x: np.abs(integral(x) - 0.68),
0.6 * self.best.pdf(self.best.MAPP), disp=0)
self.MAPPmin, self.MAPPmax = integral(y, return_x=True)
return
class genextreme():
def __init__(self, data):
self.dist = stats.genextreme
self.distname = "genextreme"
self.data = data
self.p = self.dist.fit(self.data)
self.frozen = self.dist(self.p[0], loc=self.p[1], scale=self.p[2])
self.pdf = lambda x : self.frozen.pdf(x)
self.sample = self.frozen.rvs(len(self.data))
self.sample2 = self.frozen.rvs(100000)
self.moments = self.frozen.stats(moments="mvsk")
self.MAPP = fmin(lambda x: -self.pdf(x),
self.moments[0], disp=0)[0]
try:
self.ad = stats.anderson_ksamp([self.sample, self.data])[0]
except:
self.ad = np.infty
class statdist():
def __init__(self, data, dist, distname):
self.dist = dist
self.distname = distname
self.data = data
self.p = self.dist.fit(self.data)
self.pdf = lambda x : self.dist.pdf(x, *self.p[:-2], loc=self.p[-2],
scale=self.p[-1])
self.sample = stats.norm.rvs(self.p[0], size=len(self.data),
scale=self.p[-1])
self.moments = self.dist.stats(*self.p, moments="mvsk")
self.MAPP = fmin(lambda x: -self.pdf(x),
self.moments[0], disp=0)[0]
try:
self.ad = stats.anderson_ksamp([self.sample, self.data])[0]
except:
self.ad = np.infty
class gmm():
def __init__(self, data):
self.distname = "gmm"
self.data = data
self.n_components = np.arange(1,11)
self.models = []
self.X = np.reshape(self.data, (len(self.data),1))
for i in self.n_components:
self.models.append(GMM(i, covariance_type='full').fit(self.X))
self.AIC = np.array([m.aic(self.X) for m in self.models])
self.BIC = np.array([m.bic(self.X) for m in self.models])
self.k = 2 * np.arange(1,11)
self.n = len(self.data)
self.AICc = self.AIC + 2*self.k * (self.k + 1) / (self.n - self.k - 1)
self.imin = np.minimum(np.argmin(self.AIC), np.argmin(self.BIC))
self.best = self.models[self.imin]
def hist2D(dist1, dist2, ax):
""" Plot distribution and confidence contours. """
X, Y = np.mgrid[dist1.lims[0] : dist1.lims[1] : 20j,
dist2.lims[0] : dist2.lims[1] : 20j]
extent = [dist1.lims[0], dist1.lims[1], dist2.lims[0], dist2.lims[1]]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([dist1.data, dist2.data])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap="gray_r", extent=extent, aspect="auto",
interpolation="spline16")
# plt.hist2d(dist1.data, dist2.data, bins=40, cmap="gray_r")
plt.axvline(dist1.MAPP, c="r", ls="--", lw=1.5)
plt.axhline(dist2.MAPP, c="r", ls="--", lw=1.5)
plt.tick_params(labelsize=10)
ax.minorticks_on()
plt.locator_params(axis='x',nbins=10)
return
def summary_table(specs, modelname, db):
""" Make final table."""
lines = []
for spec in specs:
folder = spec.replace(".fits", "_db{0}".format(db))
logfile = os.path.join(working_dir, folder,
"summary.txt")
if not os.path.exists(logfile):
continue
with open(logfile, "r") as f:
header = f.readline()
lines.append(f.readline())
table = os.path.join(working_dir, "populations_{0}.txt".format(modelname))
with open(table, "w") as f:
f.write(header)
f.write("\n".join(lines))
if __name__ == "__main__":
working_dir = os.path.join(home, "single2")
os.chdir(working_dir)
plt.ioff()
specs = speclist()
# specs = ["fin1_n3311cen1_s23.fits", "fin1_n3311cen1_s30.fits"]
db = ""
modelname = "miles" if db == "2" else "thomas"
dirs = [x.replace(".fits", "_db{0}".format(db)) for x in specs]
lims = [[9 + np.log10(1.), 9 + np.log10(15.)], [-2.25, 0.90], [-0.3, 0.5]]
plims = [[np.log10(1.), 1.2], [-2.3, 0.7], [-0.4, 0.6]]
fignums = [4, 7, 8]
pairs = [[0,1], [0,2], [1,2]]
plt.ioff()
pp = PdfPages(os.path.join(working_dir,
"mcmc_results_{0}.pdf".format(modelname)))
plt.figure(1, figsize=(9,6.5))
plt.minorticks_on()
table_summary, table_results = [], []
sndata = dict(np.loadtxt("ppxf_results.dat", usecols=(0,10), dtype=str))
for spec in specs:
print spec
# continue
folder = spec.replace(".fits", "_db{0}".format(db))
if not os.path.exists(os.path.join(working_dir, folder)):
continue
os.chdir(os.path.join(working_dir, folder))
name = spec.replace(".fits", '').replace("n3311", "").split("_")
name = name[1] + name[2]
name = r"{0}".format(name)
sn = float(sndata[spec])
ages_data = np.loadtxt("Chain_0/age_dist.txt")
ages_data = 9. + np.log10(ages_data)
ages = Dist(ages_data, [9 + np.log10(1), 9 + np.log10(15)])
metal_data = np.loadtxt("Chain_0/metal_dist.txt")
metal = Dist(metal_data, [-2.25, 0.90])
alpha_data = np.loadtxt("Chain_0/alpha_dist.txt")
alpha = Dist(alpha_data, [-0.3, 0.5])
dists = [ages, metal, alpha]
log, summary = [r"{0:28s}".format(spec)], []
for i, d in enumerate(dists):
weights = np.ones_like(d.data)/len(d.data)
ax = plt.subplot(3,3,(4*i)+1)
# plt.tick_params(labelsize=10)
N, bins, patches = plt.hist(d.data, color="b",ec="k", bins=30,
range=tuple(lims[i]), normed=True, edgecolor="k",
histtype='bar',linewidth=1.)
fracs = N.astype(float)/N.max()
norm = Normalize(-.2* fracs.max(), 1.5 * fracs.max())
for thisfrac, thispatch in zip(fracs, patches):
color = cm.gray_r(norm(thisfrac))
thispatch.set_facecolor(color)
thispatch.set_edgecolor("w")
x = np.linspace(d.data.min(), d.data.max(), 100)
tot = np.zeros_like(x)
# for m,w,c in zip(d.gmm.best.means_, d.gmm.best.weights_,
# d.gmm.best.covars_):
# y = w * normpdf(x, m, np.sqrt(c))[0]
# ax.plot(x, y, "--b")
# tot += y
# ax.plot(x,tot, "-b", lw=2)
# pdf = np.exp(logprob)
# pdf_individual = responsibilities * pdf[:, np.newaxis]
# print pdf_individual
ylim = ax.get_ylim()
plt.plot(x, d.best.pdf(x), "-r", label="AD = {0:.1f}".format(
d.best.ad), lw=1.5, alpha=0.7)
ax.set_ylim(ylim)
# plt.legend(loc=2, prop={'size':8})
plt.axvline(d.best.MAPP, c="r", ls="--", lw=1.5)
plt.tick_params(labelright=True, labelleft=False, labelsize=10)
plt.xlim(d.lims)
plt.locator_params(axis='x',nbins=10)
if i < 2:
plt.setp(ax.get_xticklabels(), visible=False)
else:
plt.xlabel(r"[$\mathregular{\alpha}$ / Fe]")
plt.minorticks_on()
summary.append([d.best.MAPP, d.uerr, d.lerr])
for ss in [d.MAPP, d.MAPPmin, d.MAPPmax, d.best.ad]:
log.append(r"{0:10s}".format(r"{0:.5f}".format(ss)))
logfile = os.path.join(working_dir, folder,
"summary.txt".format(modelname))
with open(logfile, "w") as f:
f.write("{0:28s}{1:10s}{2:10s}{3:10s}{6:10s}{4:10s}{2:10s}{3:10s}{6:10s}{5:10s}"
"{2:10s}{3:10s}{6:10s}\n".format("#Spectra", "Log AGE", "LOWER",
"UPPER", "[Z/H]", "[E/Fe]", "AD test"))
f.write("".join(log))
ax = plt.subplot(3,3,4)
hist2D(ages, metal, ax)
plt.setp(ax.get_xticklabels(), visible=False)
plt.ylabel("[Z/H]")
ax = plt.subplot(3,3,7)
hist2D(ages, alpha, ax)
plt.ylabel(r"[$\mathregular{\alpha}$ / Fe]")
plt.xlabel("log Age (yr)")
ax = plt.subplot(3,3,8)
plt.xlabel("[Z/H]")
hist2D(metal, alpha, ax)
# Annotations
plt.annotate(r"Spectrum: {0} S/N={1:.1f}".format(name.upper(), sn),
xy=(.7,.91),
xycoords="figure fraction", ha="center", size=20)
xys = [(.7,.84), (.7,.77), (.7,.70)]
line = r"{0:28s}".format(spec)
for j, par in enumerate([r"Log Age", r"[Z/H]", r"[$\alpha$/Fe]"]):
text = r"{0}={1[0]:.2f}$^{{+{1[1]:.2f}}}_"" \
""{{-{1[2]:.2f}}}$ dex".format(par, summary[j])
plt.annotate(text, xy=xys[j], xycoords="figure fraction",
ha="center", size=20)
line += "{0[1]:.5f}"
plt.tight_layout(pad=0.2)
# plt.pause(0.001)
# plt.show(block=True)
pp.savefig()
plt.savefig(os.path.join(working_dir,
"logs/mcmc_{0}_{1}.png".format(name, modelname)), dpi=300)
plt.clf()
pp.close()
summary_table(speclist(), modelname, db)
| kadubarbosa/hydra1 | mcmc_analysis.py | Python | gpl-2.0 | 11,798 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:data_processing.cluster_templates:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.cluster_templates:details', args=['id'])
class DataProcessingClusterTemplateTests(test.TestCase):
@test.create_stubs({api.sahara: ('cluster_template_list',)})
def test_index(self):
api.sahara.cluster_template_list(IsA(http.HttpRequest)) \
.AndReturn(self.cluster_templates.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res,
'project/data_processing.cluster_templates/'
'cluster_templates.html')
self.assertContains(res, 'Cluster Templates')
self.assertContains(res, 'Name')
@test.create_stubs({api.sahara: ('cluster_template_get',),
api.nova: ('flavor_get',)})
def test_details(self):
flavor = self.flavors.first()
ct = self.cluster_templates.first()
api.nova.flavor_get(IsA(http.HttpRequest), flavor.id) \
.MultipleTimes().AndReturn(flavor)
api.sahara.cluster_template_get(IsA(http.HttpRequest),
IsA(unicode)) \
.MultipleTimes().AndReturn(ct)
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res,
'project/data_processing.cluster_templates/'
'details.html')
| JioCloud/horizon | openstack_dashboard/dashboards/project/data_processing/cluster_templates/tests.py | Python | apache-2.0 | 2,216 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mnn.model import MNnModel
# Single disc
model = MNnModel()
model.add_disc('z', 1.0, 10.0, 100.0)
# Evaluating density and potential :
print(model.evaluate_density(1.0, 2.0, -0.5))
print(model.evaluate_potential(1.0, 2.0, -0.5))
print(model.evaluate_force(1.0, 2.0, -0.5))
# Using vectors to evaluate density along an axis :
x = np.linspace(0.0, 30.0, 100.0)
density = model.evaluate_density(x, 0.0, 0.0)
fig = plt.plot(x, density)
plt.show()
# Plotting density meshgrid
x, y, z, v = model.generate_dataset_meshgrid((0.0, 0.0, -10.0), (30.0, 0.0, 10.0), (300, 1, 200))
fig = plt.imshow(v[:, 0].T)
plt.show()
# Contour plot
x = np.linspace(0.0, 30.0, 300)
z = np.linspace(-10.0, 10.0, 200)
plt.contour(x, z, v[:, 0].T)
plt.show()
# Plotting force meshgrid
plt.close('all')
x, y, z, f = model.generate_dataset_meshgrid((-30.0, -30.0, 0.0), (30.0, 30.0, 0.0), (30, 30, 1), 'force')
x = x[:, :, 0].reshape(-1)
y = y[:, :, 0].reshape(-1)
fx = f[0, :, :, 0].reshape(-1)
fy = f[1, :, :, 0].reshape(-1)
extent = [x.min(), x.max(), y.min(), y.max()]
plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[1, 0])
pl1 = ax1.imshow(f[1, :, :, 0].T, extent=extent, aspect='auto')
ax2 = plt.subplot(gs[0, 1])
pl2 = ax2.imshow(f[0, :, :, 0].T, extent=extent, aspect='auto')
ax3 = plt.subplot(gs[1, 1])
pl3 = ax3.quiver(x.T, y.T, fx.T, fy.T, units='width', scale=0.045)
plt.show()
| mdelorme/MNn | mnn/examples/simple_model.py | Python | mit | 1,497 |
#!/usr/bin/env python
# -*- coding: ISO-8859-15 -*-
#
# Copyright (C) 2005-2007 David Guerizec <david@guerizec.net>
#
# Last modified: 2007 Dec 08, 20:11:32 by david
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os, os.path
from sshproxy.config import ConfigSection, path, get_config
from sshproxy.acl import ACLDB
from sshproxy import log
class FileACLConfigSection(ConfigSection):
section_id = 'acl_db.ini'
section_defaults = {
'file': '@acl.db',
}
types = {
'file': path,
}
FileACLConfigSection.register()
class FileACLDB(ACLDB):
def load_rules(self):
rulefile = get_config('acl_db.ini')['file']
if not os.path.exists(rulefile):
open(rulefile, 'w').close()
os.chmod(rulefile, 0600)
# no need to parse an empty file
return None
fd = open(rulefile)
nline = []
line = []
for linepart in fd.readlines():
if not linepart.strip() or linepart.strip()[0] == '#':
continue
if linepart[0] not in (' ', '\t'):
nline = [ linepart.strip() ]
if not line:
line = nline
continue
else:
line.append(linepart.strip())
continue
try:
acl, rule = (' '.join(line)).split(':', 1)
if rule is None or not rule.strip():
raise ValueError
except ValueError:
# drop rule, it won't parse anyway
log.warning('Dropped unparseable rule %s' % acl)
line = nline
continue
self.add_rule(acl=acl, rule=rule.lstrip())
line = nline
if line:
try:
acl, rule = (' '.join(line)).split(':', 1)
if rule is None or not rule.strip():
raise ValueError
self.add_rule(acl=acl, rule=rule.lstrip())
except ValueError:
# drop rule, it won't parse anyway
log.warning('Dropped unparseable rule %s' % acl)
pass
fd.close()
def save_rules(self):
rulefile = get_config('acl_db.ini')['file']
if not os.path.exists(rulefile):
open(rulefile, 'w').close()
fd = open(rulefile+'.new', 'w')
for acl in self.rules.keys():
for rule in self.rules[acl]:
fd.write('%s:\n %s\n\n'
% (acl, rule.rule.replace('\n', '\n ')))
fd.close()
os.rename(rulefile+'.new', rulefile)
| OutOfOrder/sshproxy | lib/ini_db/acl.py | Python | gpl-2.0 | 3,328 |
import unittest
from QGL import *
from QGL.tools.euler_angles import *
from QGL.tools.matrix_tools import *
from QGL.tools.clifford_tools import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_diatomic_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = diatomic_unitary(*diatomic_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
def test_diatomic_cliffords(self):
for j in range(24):
Ud = diatomic_unitary(*diatomic_angles(C1[j]))
assert is_close(Ud, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
| BBN-Q/QGL | tests/test_Euler.py | Python | apache-2.0 | 1,183 |
# @package ITDCHelper
# @author Avtandil Kikabidze aka LONGMAN
# @copyright Copyright (c) 2008-2015, Avtandil Kikabidze (akalongman@gmail.com)
# @link http://long.ge
# @license http://opensource.org/licenses/mit-license.php The MIT License (MIT)
import os
import sys
import sublime
import sublime_plugin
import threading
import subprocess
import functools
import os.path
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, "itdchelper")
if libs_path not in sys.path:
sys.path.append(libs_path)
from asanalib.asana import asana
from pprint import pprint
AsanaProjects = sublime.load_settings('ITDCHelper.sublime-settings')
class GetAsanaTasksCommand(sublime_plugin.TextCommand):
window = None
def run(self,edit,archive = False):
self.window = self.view.window()
try:
self.path = self.window.folders()[0]
except:
sublime.status_message('Not project mode')
return
self.archive = archive
if AsanaProjects.has(self.path):
project_id = AsanaProjects.get(self.path).get('id')
project_name = AsanaProjects.get(self.path).get('name')
self.task_ids = [project_id]
self.task_names = ['### '+project_name+' ###']
thread = AsanaApiCall('get_project_tasks', int(project_id), self.show_quick_panel_task)
thread.start()
else:
self.window.run_command('set_asana_project')
def show_quick_panel_task(self,tasks):
if tasks != 'cache':
for task in tasks:
if (self.archive or task[u'completed'] == False) and task[u'name'][-1:] != ':':
self.task_ids.append(task[u'id'])
self.task_names.append(task[u'name'])
if len(self.task_ids) > 0 :
pprint (self.task_names)
self.window.show_quick_panel(self.task_names, self.show_quick_panel_select)
else:
sublime.status_message('Not exist tasks')
def show_quick_panel_select(self,index):
if index == -1:
return
elif index == 0:
self.window.run_command('get_current_project')
return
self.current_task_id = self.task_ids[index]
self.current_task_name = self.task_names[index]
command = ['0: Back','1: Done','2: Done & Commit','3: Update','4: Cancel']
self.window.show_quick_panel(command, self.command_task)
def command_task(self,index):
if index == 0 :
self.show_quick_panel_task('cache')
elif index == 1 or index == 2:
del self.task_ids[index]
del self.task_names[index]
thread = AsanaApiCall('done_task', int(self.current_task_id), self.on_done)
thread.start()
if index == 2:
self.window.show_input_panel('Commit -am: ', self.current_task_name+' #'+str(self.current_task_id), self.git_commit, None, None)
elif index == 3 :
self.window.show_input_panel('Change name: ', self.current_task_name, self.update_task, None, None)
elif index == 4 :
self.on_done()
def update_task(self,name):
thread = AsanaApiCall('update_task', [int(self.current_task_id),name], self.on_done)
thread.start()
def git_commit(self,message):
thread = CommandThread(['git', 'commit', '-am',message], self.repo_name)
thread.start()
def repo_name(self,message):
self.story = message
thread = CommandThread(['git', 'remote', '-v'], self.git_log)
thread.start()
return
def git_log(self,repo):
repo = repo.split(':')
repo = repo[1].split('.git')
format = '%an, %ad \n https://github.com/'+repo[0]+'/commit/%H'
thread = CommandThread(['git', 'log', '--pretty=format:'+format,'-1'], self.add_story)
thread.start()
def add_story(self,message):
self.story = '.\n'+ self.story + '\n'+ message
thread = AsanaApiCall('add_story', [int(self.current_task_id),self.story], self.on_done)
thread.start()
def on_done(self,name=False):
if name :
sublime.status_message('Done '+ name)
self.show_quick_panel_task('cache')
class getCurrentProjectCommand(sublime_plugin.TextCommand):
window = None
def run(self,edit):
self.window = self.view.window()
command = ['0: Create New Task','1: Show Tasks','2: Show Completed Tasks','3: Change Project','4: Cancel']
# command = ['0: Create New Task','1: Show Tasks','2: Show Archive Tasks','3: Change Project','4: Update Project','5: Cancel']
self.window.show_quick_panel(command, self.command_task)
def command_task(self,index):
if index == 0 :
self.window.run_command('add_asana_task')
elif index == 1:
self.window.run_command('get_asana_tasks')
elif index == 2:
self.window.run_command('get_asana_tasks',{"archive": True})
elif index == 3 :
self.window.run_command('set_asana_project')
# elif index == 4 :
# sublime.message_dialog('Now under development')
elif index == 4 :
self.on_done()
def on_done(self,name=False):
if name :
sublime.status_message('Done '+ name)
class SetAsanaProjectCommand(sublime_plugin.TextCommand):
window = None
def run(self,edit):
self.window = self.view.window()
thread = AsanaApiCall('get_project_id', None, self.show_quick_panel)
thread.start()
def show_quick_panel(self,projects):
self.project_ids = []
self.project_names = []
self.project_workspaces = []
for project in projects:
if project[u'archived'] == False:
self.project_names.append(project[u'name'])
self.project_ids.append(project[u'id'])
self.project_workspaces.append(project[u'workspace'][u'id'])
self.window.show_quick_panel(self.project_names, self.save_project_id)
def save_project_id(self,index):
if index == -1 :
return
self.path = self.window.folders()[0]
AsanaProjects.set(self.path, {
'id':str(self.project_ids[index]),
'name':str(self.project_names[index]),
'workspace':str(self.project_workspaces[index]),
})
sublime.save_settings('AsanaProjects.sublime-settings')
self.window.run_command('get_asana_tasks')
class AddAsanaTaskCommand(sublime_plugin.TextCommand):
window = None
def run(self,edit):
self.window = self.view.window()
self.path = self.view.window().folders()[0]
self.window.show_input_panel('New Task: ', '', self.create_task, None, None)
def create_task(self,name):
project_id = AsanaProjects.get(self.path).get('id')
project_workspace = AsanaProjects.get(self.path).get('workspace')
thread = AsanaApiCall('create_task', [name,project_id,project_workspace], self.show_quick_panel)
thread.start()
def show_quick_panel(self,name):
sublime.status_message('Created: '+ name)
self.window.run_command('get_asana_tasks')
def main_thread(callback, *args, **kwargs):
# sublime.set_timeout gets used to send things onto the main thread
# most sublime.[something] calls need to be on the main thread
sublime.set_timeout(functools.partial(callback, *args, **kwargs), 0)
def _make_text_safeish(text, fallback_encoding, method='decode'):
# The unicode decode here is because sublime converts to unicode inside
# insert in such a way that unknown characters will cause errors, which is
# distinctly non-ideal... and there's no way to tell what's coming out of
# git in output. So...
try:
unitext = getattr(text, method)('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
unitext = getattr(text, method)(fallback_encoding)
return unitext
class AsanaApiCall(threading.Thread):
def __init__(self,command,args,callback):
asana_api_key = sublime.load_settings('ITDCHelper.sublime-settings').get('asana_api_key')
if not asana_api_key :
sublime.error_message('You have to set asana_api_key in the Preferences.sublime-settings. folllowing { "asana_api_key" : "YOUR_API KEY" }')
self.AsanaApi = None
else :
self.AsanaApi = asana.AsanaAPI( asana_api_key , debug=True)
self.command = command
self.args = args
self.callback = callback
threading.Thread.__init__(self)
def run(self):
if not self.AsanaApi:
return
try:
if self.command == 'get_project_tasks':
tasks = self.AsanaApi.get_project_tasks(self.args)
main_thread(self.callback, tasks)
elif self.command == 'get_task':
projects = self.AsanaApi.get_task(self.args)
main_thread(self.callback,projects)
elif self.command == 'get_project_id':
projects = self.AsanaApi.list_projects()
main_thread(self.callback,projects)
elif self.command == 'create_task':
task = self.AsanaApi.create_task(self.args[0], self.args[2])
self.AsanaApi.add_project_task(task[u'id'], self.args[1])
main_thread(self.callback,self.args[0])
elif self.command == 'update_task':
task = self.AsanaApi.update_task(self.args[0], self.args[1])
main_thread(self.callback,task[u'name'])
elif self.command == 'add_story':
task = self.AsanaApi.add_story(self.args[0], self.args[1])
main_thread(self.callback,self.args[1])
elif self.command == 'done_task':
task = self.AsanaApi.update_task(self.args, None, None, None, True)
main_thread(self.callback,task[u'name'])
return
except:
err = 'error'
sublime.error_message(err)
self.result = False
# self.view.window().run_command('exec', {'cmd': ['sh', 'script.sh'], 'quiet': False})
class CommandThread(threading.Thread):
def __init__(self, command, on_done, working_dir="", fallback_encoding="", **kwargs):
threading.Thread.__init__(self)
self.command = command
self.on_done = on_done
self.working_dir = working_dir
if "stdin" in kwargs:
self.stdin = kwargs["stdin"]
else:
self.stdin = None
if "stdout" in kwargs:
self.stdout = kwargs["stdout"]
else:
self.stdout = subprocess.PIPE
self.fallback_encoding = fallback_encoding
self.kwargs = kwargs
def run(self):
try:
# Per http://bugs.python.org/issue8557 shell=True is required to
# get $PATH on Windows. Yay portable code.
shell = os.name == 'nt'
if self.working_dir != "":
os.chdir(self.working_dir)
proc = subprocess.Popen(self.command,
stdout=self.stdout, stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
shell=shell, universal_newlines=True)
output = proc.communicate(self.stdin)[0]
if not output:
output = ''
# if sublime's python gets bumped to 2.7 we can just do:
# output = subprocess.check_output(self.command)
main_thread(self.on_done,
_make_text_safeish(output, self.fallback_encoding), **self.kwargs)
except subprocess.CalledProcessError as e:
main_thread(self.on_done, e.returncode)
except OSError as e:
if e.errno == 2:
main_thread(sublime.error_message, "Git binary could not be found in PATH\n\nConsider using the git_command setting for the Git plugin\n\nPATH is: %s" % os.environ['PATH'])
else:
raise e
| itdc/sublimetext-itdchelper | ITDCHelperAsana.py | Python | mit | 12,157 |
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 589 $
# $Date: 2009-02-04 14:33:05 -0700 (Wed, 04 Feb 2009) $
# $Author: gabalz $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/tests/test_1_environment.py $
import random
import sys
from rlglue.environment.Environment import Environment
from rlglue.environment import EnvironmentLoader as EnvironmentLoader
from rlglue.types import Observation
from rlglue.types import Action
from rlglue.types import Reward_observation_terminal
class test_1_environment(Environment):
stepCount=0
o=Observation()
def env_init(self):
return "sample task spec"
def env_start(self):
self.stepCount=0
self.o.intArray=[1]
self.o.doubleArray=[0.0/2.0, 1.0/2.0]
self.o.charArray=['a','b','c']
return self.o;
def env_step(self,action):
ro=Reward_observation_terminal()
terminal=False
if self.stepCount < 5:
self.o.doubleArray=[]
self.o.charArray=[]
self.o.intArray=[self.stepCount]
self.stepCount=self.stepCount+1
if self.stepCount==5:
terminal=True
ro.r=1.0
else:
self.o.doubleArray=[0.0078125,-0.0078125,0.0,0.0078125e150,-0.0078125e150]
self.o.charArray=['g','F','?',' ','&']
self.o.intArray=[173,-173,2147483647,0,-2147483648]
ro.r=-2.0
ro.o=self.o
ro.terminal=terminal
return ro
def env_cleanup(self):
pass
def env_message(self,inMessage):
timesToPrint=self.stepCount%3
outMessage=inMessage+"|"
for i in range(0, timesToPrint):
outMessage=outMessage+"%d" % (self.stepCount)
outMessage=outMessage+"."
outMessage=outMessage+"|"+inMessage
return outMessage
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(test_1_environment())
| okkhoy/mo-rlglue-python-codec | tests/test_1_environment.py | Python | mit | 2,322 |
import os
import numpy as np
from dipy.viz import actor, window
import numpy.testing as npt
from nibabel.tmpdirs import TemporaryDirectory
from dipy.tracking.streamline import center_streamlines, transform_streamlines
from dipy.align.tests.test_streamlinear import fornix_streamlines
from dipy.testing.decorators import xvfb_it
use_xvfb = os.environ.get('TEST_WITH_XVFB', False)
if use_xvfb == 'skip':
skip_it = True
else:
skip_it = False
run_test = (actor.have_vtk and
actor.have_vtk_colors and
window.have_imread and
not skip_it)
if actor.have_vtk:
if actor.major_version == 5 and use_xvfb:
skip_slicer = True
else:
skip_slicer = False
else:
skip_slicer = False
@npt.dec.skipif(skip_slicer)
@npt.dec.skipif(not run_test)
@xvfb_it
def test_slicer():
renderer = window.renderer()
data = (255 * np.random.rand(50, 50, 50))
affine = np.eye(4)
slicer = actor.slicer(data, affine)
slicer.display(None, None, 25)
renderer.add(slicer)
renderer.reset_camera()
renderer.reset_clipping_range()
# window.show(renderer)
# copy pixels in numpy array directly
arr = window.snapshot(renderer, 'test_slicer.png', offscreen=False)
import scipy
print(scipy.__version__)
print(scipy.__file__)
print(arr.sum())
print(np.sum(arr == 0))
print(np.sum(arr > 0))
print(arr.shape)
print(arr.dtype)
report = window.analyze_snapshot(arr, find_objects=True)
print(report)
npt.assert_equal(report.objects, 1)
# print(arr[..., 0])
# The slicer can cut directly a smaller part of the image
slicer.display_extent(10, 30, 10, 30, 35, 35)
renderer.ResetCamera()
renderer.add(slicer)
# save pixels in png file not a numpy array
with TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'slice.png')
# window.show(renderer)
arr = window.snapshot(renderer, fname, offscreen=False)
report = window.analyze_snapshot(fname, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_raises(ValueError, actor.slicer, np.ones(10))
renderer.clear()
rgb = np.zeros((30, 30, 30, 3))
rgb[..., 0] = 1.
rgb_actor = actor.slicer(rgb)
renderer.add(rgb_actor)
renderer.reset_camera()
renderer.reset_clipping_range()
arr = window.snapshot(renderer, offscreen=False)
report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
npt.assert_equal(report.objects, 1)
npt.assert_equal(report.colors_found, [True])
lut = actor.colormap_lookup_table(scale_range=(0, 255),
hue_range=(0.4, 1.),
saturation_range=(1, 1.),
value_range=(0., 1.))
renderer.clear()
slicer_lut = actor.slicer(data, lookup_colormap=lut)
slicer_lut.display(10, None, None)
slicer_lut.display(None, 10, None)
slicer_lut.display(None, None, 10)
slicer_lut2 = slicer_lut.copy()
slicer_lut2.display(None, None, 10)
renderer.add(slicer_lut2)
renderer.reset_clipping_range()
arr = window.snapshot(renderer, offscreen=False)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
renderer.clear()
data = (255 * np.random.rand(50, 50, 50))
affine = np.diag([1, 3, 2, 1])
slicer = actor.slicer(data, affine, interpolation='nearest')
slicer.display(None, None, 25)
renderer.add(slicer)
renderer.reset_camera()
renderer.reset_clipping_range()
arr = window.snapshot(renderer, offscreen=False)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_equal(data.shape, slicer.shape)
renderer.clear()
data = (255 * np.random.rand(50, 50, 50))
affine = np.diag([1, 3, 2, 1])
from dipy.align.reslice import reslice
data2, affine2 = reslice(data, affine, zooms=(1, 3, 2),
new_zooms=(1, 1, 1))
slicer = actor.slicer(data2, affine2, interpolation='linear')
slicer.display(None, None, 25)
renderer.add(slicer)
renderer.reset_camera()
renderer.reset_clipping_range()
# window.show(renderer, reset_camera=False)
arr = window.snapshot(renderer, offscreen=False)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_array_equal([1, 3, 2] * np.array(data.shape),
np.array(slicer.shape))
@npt.dec.skipif(not run_test)
@xvfb_it
def test_streamtube_and_line_actors():
renderer = window.renderer()
line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
line2 = line1 + np.array([0.5, 0., 0.])
lines = [line1, line2]
colors = np.array([[1, 0, 0], [0, 0, 1.]])
c = actor.line(lines, colors, linewidth=3)
window.add(renderer, c)
c = actor.line(lines, colors, spline_subdiv=5, linewidth=3)
window.add(renderer, c)
# create streamtubes of the same lines and shift them a bit
c2 = actor.streamtube(lines, colors, linewidth=.1)
c2.SetPosition(2, 0, 0)
window.add(renderer, c2)
arr = window.snapshot(renderer)
report = window.analyze_snapshot(arr,
colors=[(255, 0, 0), (0, 0, 255)],
find_objects=True)
npt.assert_equal(report.objects, 4)
npt.assert_equal(report.colors_found, [True, True])
# as before with splines
c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1)
c2.SetPosition(2, 0, 0)
window.add(renderer, c2)
arr = window.snapshot(renderer)
report = window.analyze_snapshot(arr,
colors=[(255, 0, 0), (0, 0, 255)],
find_objects=True)
npt.assert_equal(report.objects, 4)
npt.assert_equal(report.colors_found, [True, True])
@npt.dec.skipif(not run_test)
@xvfb_it
def test_bundle_maps():
renderer = window.renderer()
bundle = fornix_streamlines()
bundle, shift = center_streamlines(bundle)
mat = np.array([[1, 0, 0, 100],
[0, 1, 0, 100],
[0, 0, 1, 100],
[0, 0, 0, 1.]])
bundle = transform_streamlines(bundle, mat)
# metric = np.random.rand(*(200, 200, 200))
metric = 100 * np.ones((200, 200, 200))
# add lower values
metric[100, :, :] = 100 * 0.5
# create a nice orange-red colormap
lut = actor.colormap_lookup_table(scale_range=(0., 100.),
hue_range=(0., 0.1),
saturation_range=(1, 1),
value_range=(1., 1))
line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
window.add(renderer, line)
window.add(renderer, actor.scalar_bar(lut, ' '))
report = window.analyze_renderer(renderer)
npt.assert_almost_equal(report.actors, 1)
# window.show(renderer)
renderer.clear()
nb_points = np.sum([len(b) for b in bundle])
values = 100 * np.random.rand(nb_points)
# values[:nb_points/2] = 0
line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
renderer.add(line)
# window.show(renderer)
report = window.analyze_renderer(renderer)
npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
renderer.clear()
colors = np.random.rand(nb_points, 3)
# values[:nb_points/2] = 0
line = actor.line(bundle, colors, linewidth=2)
renderer.add(line)
# window.show(renderer)
report = window.analyze_renderer(renderer)
npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
# window.show(renderer)
arr = window.snapshot(renderer)
report2 = window.analyze_snapshot(arr)
npt.assert_equal(report2.objects, 1)
# try other input options for colors
renderer.clear()
actor.line(bundle, (1., 0.5, 0))
actor.line(bundle, np.arange(len(bundle)))
actor.line(bundle)
colors = [np.random.rand(*b.shape) for b in bundle]
actor.line(bundle, colors=colors)
if __name__ == "__main__":
npt.run_module_suite()
| villalonreina/dipy | dipy/viz/tests/test_fvtk_actors.py | Python | bsd-3-clause | 8,239 |
# MNIST and Dropout
import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# dropout (keep_prob) rate 0.7 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)
W1 = tf.get_variable("W1", shape=[784, 512],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
W2 = tf.get_variable("W2", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
W3 = tf.get_variable("W3", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
W4 = tf.get_variable("W4", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
W5 = tf.get_variable("W5", shape=[512, 10],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L4, W5) + b5
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1}))
| ram1993/neuralnetwork | Tensorflow/deep_nn_mnist.py | Python | mit | 3,041 |
from template import Template
from template.test import TestCase, main
class CaseTest(TestCase):
def testCase(self):
ttdef = Template({ 'POST_CHOMP': 1 })
ttanycase = Template({ 'ANYCASE': 1, 'POST_CHOMP': 1 })
tts = (('default', ttdef), ('anycase', ttanycase))
self.Expect(DATA, tts, self._callsign())
DATA = r"""
-- test --
[% include = a %]
[% for = b %]
i([% include %])
f([% for %])
-- expect --
i(alpha)
f(bravo)
-- test --
[% IF a AND b %]
good
[% ELSE %]
bad
[% END %]
-- expect --
good
-- test --
# 'and', 'or' and 'not' can ALWAYS be expressed in lower case, regardless
# of CASE sensitivity option.
[% IF a and b %]
good
[% ELSE %]
bad
[% END %]
-- expect --
good
-- test --
[% include = a %]
[% include %]
-- expect --
alpha
-- test --
-- use anycase --
[% include foo bar='baz' %]
[% BLOCK foo %]this is foo, bar = [% bar %][% END %]
-- expect --
this is foo, bar = baz
-- test --
[% 10 div 3 %] [% 10 DIV 3 +%]
[% 10 mod 3 %] [% 10 MOD 3 %]
-- expect --
3 3
1 1
"""
| gsnedders/Template-Python | t/case_test.py | Python | artistic-2.0 | 1,008 |
from .selection import split_selection
| twolfson/sublime-plugin-tests | sublime_plugin_tests/utils/__init__.py | Python | unlicense | 39 |
# -*- coding: utf-8 -*-
import time
import datetime
from nose.tools import * # noqa; PEP8 asserts
from osf_tests.factories import ProjectFactory, NodeFactory, AuthUserFactory
from tests.base import OsfTestCase
from framework.auth.decorators import Auth
from website.profile import utils
class TestContributorUtils(OsfTestCase):
def setUp(self):
super(TestContributorUtils, self).setUp()
self.project = ProjectFactory()
def test_serialize_user(self):
serialized = utils.serialize_user(self.project.creator, self.project)
assert_true(serialized['visible'])
assert_equal(serialized['permission'], 'admin')
def test_serialize_user_full_does_not_include_emails_by_default(self):
serialized = utils.serialize_user(self.project.creator, self.project, full=True)
assert_not_in('emails', serialized)
def test_serialize_user_full_includes_email_if_is_profile(self):
serialized = utils.serialize_user(
self.project.creator,
self.project,
full=True,
is_profile=True
)
assert_in('emails', serialized)
def test_serialize_user_admin(self):
serialized = utils.serialize_user(self.project.creator, self.project, admin=True)
assert_false(serialized['visible'])
assert_equal(serialized['permission'], 'read')
class TestContributorViews(OsfTestCase):
def setUp(self):
super(TestContributorViews, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def test_get_contributors_no_limit(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
url = self.project.api_url_for('get_contributors')
res = self.app.get(url, auth=self.user.auth)
# Should be two visible contributors on the project
assert_equal(
len(res.json['contributors']),
2,
)
def test_get_contributors_with_limit(self):
# Add five contributors
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
# Set limit to three contributors
url = self.project.api_url_for('get_contributors', limit=3)
res = self.app.get(url, auth=self.user.auth)
# Should be three visible contributors on the project
assert_equal(
len(res.json['contributors']),
3,
)
# There should be two 'more' contributors not shown
assert_equal(
(res.json['more']),
2,
)
def test_get_contributors_from_parent(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
component = NodeFactory(parent=self.project, creator=self.user)
user_already_on_component = AuthUserFactory()
component.add_contributor(
user_already_on_component,
auth=self.auth,
visible=True,
)
self.project.add_contributor(
user_already_on_component,
auth=self.auth,
visible=True,
)
self.project.save()
component.save()
url = component.api_url_for('get_contributors_from_parent')
res = self.app.get(url, auth=self.user.auth)
# Should be all contributors, client-side handles marking
# contributors that are already added to the child.
ids = [contrib['id'] for contrib in res.json['contributors']]
assert_not_in(user_already_on_component.id, ids)
assert_equal(
len(res.json['contributors']),
2,
)
| laurenrevere/osf.io | tests/test_contributors_views.py | Python | apache-2.0 | 4,701 |
""" io on the clipboard """
import warnings
from pandas.compat import StringIO, PY2, PY3
from pandas.core.dtypes.generic import ABCDataFrame
from pandas import compat, get_option, option_context
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
Parameters
----------
sep : str, default '\s+'.
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = {x.lstrip().count('\t') for x in lines}
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = '\t'
# Edge case where sep is specified to be None, return to default
if sep is None and kwargs.get('delim_whitespace') is None:
sep = r'\s+'
# Regex separator currently only works with python engine.
# Default to python if separator is multi-character (regex)
if len(sep) > 1 and kwargs.get('engine') is None:
kwargs['engine'] = 'python'
elif len(sep) > 1 and kwargs.get('engine') == 'c':
warnings.warn('read_clipboard with regex separator does not work'
' properly with c engine')
# In PY2, the c table reader first encodes text with UTF-8 but Python
# table reader uses the format of the passed string. For consistency,
# encode strings for python engine so that output from python and c
# engines produce consistent results
if kwargs.get('engine') == 'python' and PY2:
text = text.encode('utf-8')
return read_table(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop('encoding', 'utf-8')
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise ValueError('clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
text = buf.getvalue()
if PY2:
text = text.decode('utf-8')
clipboard_set(text)
return
except TypeError:
warnings.warn('to_clipboard in excel mode requires a single '
'character separator.')
elif sep is not None:
warnings.warn('to_clipboard with excel=False ignores the sep argument')
if isinstance(obj, ABCDataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| kdebrab/pandas | pandas/io/clipboards.py | Python | bsd-3-clause | 4,885 |
from puq import *
def run():
p1 = UniformParameter('x', 'x', min=-5, max=5)
host = InteractiveHost()
uq = Smolyak([p1], level=2)
# call the wrapper with a=1, b=2, c=3
# example using bash wrapper
prog = TestProgram(desc='Quadratic using bash wrapper',
exe="./sim_wrap.sh 1 2 3 $x")
# example using python wrapper
"""
prog = TestProgram(desc='Quadratic using bash wrapper',
exe="./sim_wrap.py 1 2 3 $x")
"""
return Sweep(uq, host, prog)
| c-PRIMED/puq | examples/wrappers/basic/quad.py | Python | mit | 515 |
#derived from ENSEMBLs example client, https://github.com/Ensembl/ensembl-rest/wiki/Example-Python-Client
#tested on Python 3.2.3
#see also: http://rest.ensembl.org/documentation
import sys
import urllib.error
import urllib.parse
import urllib.request
import json
import time
import datetime
DEFAULT_SERVER = 'http://rest.ensembl.org'
DEFAULT_REQS_PER_SECOND = 13
DEFAULT_GENOMIC_REFERENCE = "GRCh37.p13"
MAX_SLICE_FOR_GENES = 500000
VERSION="1.2.8"
def ItemOrDefault(dict, key, default="[None found]"):
if (key in dict.keys()): return dict[key]
else: return default
#Classes
class ENSEMBL_Object():
def __init__(this, _dataDict):
this.dataDict = _dataDict
def __getattr__(self, item):
if item in self.dataDict.keys(): return self.dataDict[item]
else:
try:
return object.__getattribute__(this, item)
except:
return "[None found]"
def __str__(this):
out = "\t".join([this.display_name, this.id, this.description, this.Parent, this.object_type, str(this.start), str(this.end), this.assembly_name])
return out
class Transcript(ENSEMBL_Object):
def __init__(this, transDict):
this.dataDict = transDict
if ((this.object_type!="Transcript") & (this.feature_type!="transcript")): raise ValueError("Attempted to create object of type 'transcript' from data of type '{}'".format(this.object_type))
lookupStr = str('/lookup/id/' + this.Parent)
this.parentData = client.perform_rest_action(lookupStr)
if (this.parentData):
this.parentData = str(this.parentData)
else: this.parentData = ""
def __str__(this):
#Neigbor_Item_Name\tENSEMBL_ID\tNeighbor_Item Type\tBiological_Type\tSource\tNeighbor_Chromosome\tNeighbor_Start\tNeighbor_Stop\tNeighbor_Strand\tNeighbor_Assembly\Parent
out = "\t".join([
this.id,
this.transcript_id,
this.feature_type,
this.biotype,
"N/A",
this.source,
str(this.seq_region_name),
str(this.start),
str(this.end),
str(this.strand),
this.assembly_name,
this.Parent
])
return out
def __getattr__(self, item):
if item in self.dataDict.keys(): return self.dataDict[item]
else:
try:
return object.__getattribute__(this, item)
except:
return "[None found]"
class Gene(ENSEMBL_Object):
def __init__(this, dataDict):
this.dataDict = dataDict
# try: assert 'object_type' in this.dataDict
# except AssertionError: this.dataDict['object_type'] = "[None found]"
if ((this.object_type != "Gene") & (this.feature_type != "gene")): raise ValueError("Attempted to create object of type 'gene' from data of type '{}'".format(this.object_type))
try: assert 'description' in this.dataDict
except AssertionError: this.dataDict['description'] = "[None found]"
if(this.dataDict['description']==None): this.dataDict['description']="[None found]"
def __getattr__(self, item):
if item in self.dataDict.keys(): return self.dataDict[item]
else:
try:
return object.__getattribute__(this, item)
except:
return "[None found]"
def __str__(this):
#Neigbor_Item_Name\tENSEMBL_ID\tNeighbor_Item Type\tBiological_Type\tSource\tNeighbor_Chromosome\tNeighbor_Start\tNeighbor_Stop\tNeighbor_Strand\tNeighbor_Assembly\Parent
out = "\t".join([
this.display_name,
this.gene_id,
this.feature_type,
this.biotype,
this.description,
this.source,
str(this.seq_region_name),
str(this.start),
str(this.end),
str(this.strand),
this.assembly_name,
"(is Parent)"
])
return out
#Contains actual processing!
class EnsemblRestClient():
def __init__(this, server=DEFAULT_SERVER, reqs_per_sec=DEFAULT_REQS_PER_SECOND):
this.server = server
this.reqs_per_sec = reqs_per_sec
this.req_count = 0
this.last_req = 0
def perform_rest_action(this, endpoint, params=None):
if params is not None: endpoint += '?' + urllib.parse.urlencode(params)
data = None
# check if we need to rate limit ourselves
if this.req_count >= this.reqs_per_sec:
delta = time.time() - this.last_req
if delta < 1:
time.sleep(1 - delta) #sleep until limit normal again
this.last_req = time.time()
this.req_count = 0 #reset limit
try:
req = urllib.request.Request(this.server + endpoint, headers = {'content-type':'application/json'})
response = urllib.request.urlopen(req)
content = response.read().decode('utf-8')
if content: data = json.loads(content) #should always be JSON
this.req_count += 1
except urllib.error.HTTPError as e: # check if we are being rate limited by the server
if e.code == 429:
if 'Retry-After' in e.headers:
retry = e.headers['Retry-After']
time.sleep(float(retry))
this.perform_rest_action(endpoint, params) #recurse this after specified retry period
else:
sys.stderr.write('Request failed for {0}: {1.code} {1.reason}\n'.format(endpoint, e))
return data
def get_variants(this, species, symbol):
genes = this.perform_rest_action(
'/xrefs/symbol/{0}/{1}'.format(species, symbol),
params={'object_type': 'gene'}
)
if genes:
stable_id = genes[0]['id']
variants = this.perform_rest_action(
'/overlap/id/{0}'.format(stable_id),
params={'feature': 'variation'}
)
return variants
return None
def get_SNP(this, species, symbol):
SNP = this.perform_rest_action('/variation/{0}/{1}'.format(species, symbol))
if SNP:
DNAData = SNP['mappings'][0]
data = []
location = DNAData['location'].split(':')
location[1] = location[1].split("-")[0]
data.append(SNP["name"])
data.append(DNAData['allele_string'])
data.append(SNP['ancestral_allele'])
data.append(SNP['minor_allele'])
data.append(DNAData['assembly_name'])
data.append(DNAData['strand'])
data.append(location[0])
data.append(location[1])
if(len(SNP["synonyms"])>0): data.append("\t".join(SNP["synonyms"]))
else: data.append("[None found]")
return data
return [symbol, "[None found]", "[None found]", "[None found]", "[None found]", "[None found]", "[None found]", "[None found]", "[None found]"]
def get_Feature(this, symbol):
data = this.perform_rest_action('/lookup/id/{0}'.format(symbol))
if data:
if symbol[:4]=="ENSG":
_data = Gene(data)
elif symbol[:4]=="ENST":
_data = Transcript(data)
elif symbol[:4]=="ENSO": #REDO - ENSOARG doesn't mean ENS OBJ but ENS Ovis ARies Gene
_data = ENSEMBL_Object(data)
else:
return "Cannot parse unknown type " + symbol[:4]
return _data
return None
#/overlap/region/human/7:140424943-140624564?feature=gene;feature=transcript;feature=cds;feature=exon;content-type=application/json
def get_Features_around_SNP(this, species, chromosome, start, stop, features):
if stop < start:
a = start
start = stop
stop = a
del a
#todo - redo??
if (abs(stop-start) > MAX_SLICE_FOR_GENES):
print("Range too high! Adjusting from both sides...")
a = (abs(stop-start)-MAX_SLICE_FOR_GENES)/2
start = int(start + a)
stop = int(stop - a)
#query = '/overlap/region/{}/{}:{}-{}?feature=regulatory'.format(species, chromosome, start, stop)
#Enum(band, gene, transcript, cds, exon, repeat, simple, misc, variation, somatic_variation, structural_variation, somatic_structural_variation, constrained, regulatory, segmentation, motif, chipseq, array_probe)
featurestring = ";".join(map(lambda x: "feature="+x, features))
query = '/overlap/region/{}/{}:{}-{}?{}'.format(species, chromosome, start, stop, featurestring)
#print(query)
Features = this.perform_rest_action(query)
if Features:
data = []
for feature in Features:
#print(feature)
if(feature['feature_type']=="transcript"):
_feature = Transcript(feature)
elif (feature['feature_type']=="gene"): _feature = Gene(feature)
else:
print("WARNING: Unknown feature type:" + feature['feature_type'])
continue
data.append(_feature)
return data
return None
#/overlap/region/human/7:140424943-140624564?feature=gene;feature=transcript;feature=cds;feature=exon;content-type=application/json
def get_Overlapping_Features(this, species, chromosome, start, stop, features):
if stop < start:
a = start
start = stop
stop = a
del a
#query = '/overlap/region/{}/{}:{}-{}?feature=regulatory'.format(species, chromosome, start, stop)
#Enum(band, gene, transcript, cds, exon, repeat, simple, misc, variation, somatic_variation, structural_variation, somatic_structural_variation, constrained, regulatory, segmentation, motif, chipseq, array_probe)
query = '/overlap/region/{}/{}:{}-{}'.format(species, chromosome, start, stop)
query += "?"
for feature in features: query += "feature=" + feature + ";"
#print(query)
Features = this.perform_rest_action(query)
if Features:
data = []
for feature in Features:
#print(feature)
if(feature['feature_type']=="transcript"):
_feature = Transcript(feature)
elif (feature['feature_type']=="gene"): _feature = Gene(feature)
else:
print("WARNING: Unknown feature type:" + feature['feature_type'])
continue
data.append(_feature)
return data
return None
def remap_to_other_Assembly(this, species, oldAssembly, newAssembly, oldChromosome, oldStart, oldEnd, oldStrand = 1):
endpoint = '/map/{}/{}/{}:{}..{}:{}/{}'.format(species, oldAssembly, oldChromosome, oldStart, oldEnd, oldStrand, newAssembly)
remap = this.perform_rest_action(endpoint, {'coord_system':'chromosome', 'target_coord_system':'chromosome'})['mappings']
if(len(remap)>=1):
remap = remap[0]['mapped']
#new assembly chromosome start stop strand
result = "\t".join(map(lambda y: str(y), [remap["assembly"], remap["seq_region_name"],remap["start"],remap["end"],remap["strand"]]))
return result
return None
#def get_Gene_location(this, geneID):
# endpoint = '/lookup/id/{}'.format(geneID)
# geneLoc = this.perform_rest_action(endpoint)
# if(len(geneLoc)>=1):
# #TODO PROCESS OUTPUT
# #return geneLocProcessed
# return None
# /vep/human/id/COSM476?content-type=application/json
def get_SNP_Consequence(this, species, rsID):
endpoint='/vep/{}/id/{}'.format(species, rsID.strip())
VEPdata = this.perform_rest_action(endpoint)
if(VEPdata):
return VEPdata[0]
return None
def get_condensed_human_homolog(this, geneID):
#;target_species=human;format=condensed
endpoint='/homology/id/{}'.format(geneID)
HomologData = this.perform_rest_action(endpoint, params={'target_species':'human', 'format':'condensed'})['data']
if (len(HomologData) > 0): HomologData = HomologData[0]['homologies']
else: return("None Found")
if(len(HomologData) > 0):
result = HomologData[0]['id']
return(result)
else: return('None Found')
#currently unused #TODO
def get_sequence_from_region(this, chromosome, start, stop, species="human", strand="1"):
if((stop-start)>10e+6): stop = start + 10e+6
endpoint='/sequence/region/{}/{}:{}..{}:{}'.format(species, chromosome, start, stop, strand)
SequenceData = this.perform_rest_action(endpoint, params={'content-type':'text/plain'})
if (SequenceData): return SequenceData
else: return None
def get_sequence_from_identifier(this, ID, seqType):
if seqType not in ["genomic", "cds", "cdna", "protein"]: raise ArgumentException("Error in get_sequence_from_identifier(): type must be one of [genomic/cdna/cda/protein], is " + seqType)
endpoint='/sequence/id/{}'.format(ID)
params={'type':seqType}
if seqType == "protein" : params["multiple_sequences"]=1
SequenceData = this.perform_rest_action(endpoint, params=params)
if(SequenceData): return SequenceData
else: return None
def get_r2_value(this, snp1, snp2, species, population=None):
if snp1=='' or snp2=='': return None
endpoint='/ld/{}/pairwise/{}/{}'.format(species, snp1, snp2)
params={}
if population is not None: params['population']=population
R2Data = this.perform_rest_action(endpoint, params=params)
if(R2Data): return R2Data
else:
return None
#Functions
def batch_SNPs(species="human"):
#expected input: single column, rsID according to dbSNP
# client = EnsemblRestClient()
with open("./input SNP.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL SNP Data.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: SNP data retrieval\nParameters:\nSpecies\t{}\n\n".format(VERSION, runtime,species))
out = "SNPID\tAlleleString\tAncestralAllele\tMinorAllele\tAssembly\tStrand\tChromosome\tPosition\tSynonyms\n"
print(out)
output.write(out)
for line in inputfile:
data = client.get_SNP(species, line.strip())
data = map(lambda x: str(x), data)
out = "\t".join(data)
print(out)
output.write(out+"\n")
return
def batch_SNP_Consequences(species="human"):
# client = EnsemblRestClient()
with open("./input SNP.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL VEP output.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: Variant-Effect Prediction query\nParameters:\nSpecies\t{}\n\n".format(VERSION, runtime,species))
out = "\nSNPID\tAssociatedGeneID\tAssociatedTranscriptID\tAssociatedGeneName\tAssociatedGeneType\tImpactRating\tVariantAllele\tConsequenceTerms\n"
print(out)
output.write(out)
for line in inputfile:
data = client.get_SNP_Consequence(species, line)
SNP = data["id"]
MSC = data["most_severe_consequence"]
if ("transcript_consequences" in data.keys()):
for AssocDataDict in data["transcript_consequences"]:
GeneID = ItemOrDefault(AssocDataDict, "gene_id")
TranscriptID = ItemOrDefault(AssocDataDict, "transcript_id")
GeneName = ItemOrDefault(AssocDataDict, "gene_symbol")
GeneType = ItemOrDefault(AssocDataDict, "biotype")
Impact = ItemOrDefault(AssocDataDict, "impact")
VariantAllele = ItemOrDefault(AssocDataDict, "variant_allele")
Consequences = "/".join(ItemOrDefault(AssocDataDict, "consequence_terms", ["[None found]"]))
out="{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t".format(SNP, GeneID, TranscriptID, GeneName, GeneType, Impact, VariantAllele, Consequences)
print(out)
output.write(out+"\n")
if("intergenic_consequences" in data.keys()):
for AssocDataDict in data["intergenic_consequences"]:
GeneID = "(Intergenic; no Gene)"
TranscriptID = "(Intergenic; no Transcript)"
GeneName = "(Intergenic; no Gene)"
GeneType = "(Intergenic; not expressed)"
Impact = ItemOrDefault(AssocDataDict, "impact")
VariantAllele = ItemOrDefault(AssocDataDict, "variant_allele")
Consequences = "/".join(ItemOrDefault(AssocDataDict, "consequence_terms", ["[None found]"]))
out="{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t".format(SNP, GeneID, TranscriptID, GeneName, GeneType, Impact, VariantAllele, Consequences)
print(out)
output.write(out+"\n")
def batch_SNP_Contextualise(species="human", features=["gene"]):
#expected input SNPID Chromosome Location (single number)
with open("./input feature search.txt" , 'r') as input:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL Batch Feature Search output.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: In-Range search\nParameters:\nSearch range\t{}bp either side\nFeature type(s)\t{}\nSpecies\t{}\n\n".format(VERSION, runtime, MAX_SLICE_FOR_GENES/2, ";".join(features), species))
out = "Base_SNP\tSNP_Chromosome\tSNP_Position\tNeigbor_Item_Name\tENSEMBL_ID\tNeighbor_Item Type\tBiological_Type\tSource\tChromosome\tStart\tStop\tAssembly\n"
print(out)
output.write(out)
for line in input:
line = line.split("\t")
baseSNP = line[0]
chromosome = int(line[1])
start = int(max(1, int(line[2]) - (MAX_SLICE_FOR_GENES/2)))
stop = int(int(line[2]) + (MAX_SLICE_FOR_GENES/2))
data = client.get_Features_around_SNP(species, chromosome, start, stop, features)
#print(data)
if data is not None:
for item in data:
out="\t".join(list(map(lambda x: str(x), [baseSNP, chromosome, line[2].rstrip(), item])))
print(out)
output.write(out+"\n")
return
def batch_Feature_Overlap(species="human", features=["gene"]):
#expected input RefID Chromosome Start Stop
with open("./input feature search.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL Batch Feature Search output.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: In-Range search\nParameters:\nSearch range\t{}bp either side\nFeature type(s)\t{}\nSpecies\t{}\n\n".format(VERSION, runtime, MAX_SLICE_FOR_GENES/2, ";".join(features), species))
out = "Base_Feature\tBase_Chromosome\tBase_Start\tBase_Stop\tNeigbor_Item_Name\tENSEMBL_ID\tNeighbor_Item_Type\tBiological_Type\tName\tSource\tNeighbor_Chromosome\tNeighbor_Start\tNeighbor_Stop\tNeighbor_Strand\tNeighbor_Assembly\tParent\n"
print(out)
output.write(out)
for line in inputfile:
line = line.split("\t")
baseFeature = line[0]
chromosome = str(line[1])
start = int(line[2])
if len(line) < 4:
stop = start + 1
else:
stop = int(line[3])
search_start = int(max(1, (start - (MAX_SLICE_FOR_GENES/2))))
search_stop = int(stop + (MAX_SLICE_FOR_GENES/2))
data = client.get_Overlapping_Features(species, chromosome, search_start, search_stop, features)
#print(data)
if data is not None:
for item in data:
out="\t".join(list(map(lambda x: str(x), [baseFeature, chromosome, start, stop, item])))
print(out)
output.write(out+"\n")
return
def batch_Characterise():
#expected input: ENSEMBL_ID
with open("./input characterise.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL Batch Feature contextualisation output.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: Feature annotation (Transcript, beta version)\n\n".format(VERSION, runtime, ))
out = "Feature_ID\tName\tAvailable Data\n"
print(out)
output.write(out)
lineCount = 0
for line in inputfile:
lineCount+=1
line = line.split("\t")
ID = line[0].strip()
data = client.get_Feature(ID)
if data is not None:
out="\t".join(list(map(lambda x: str(x), [ID, data])))
print(out)
output.write(out+"\n")
#if(lineCount%100==0): print("Processing line {}".format(lineCount))
return
def batch_remap(species="human"):
#expected input: tab-separated, input assembly (GrCH[X]), chromosome, start, stop, output assembly (GrCh[X])
# client = EnsemblRestClient()
with open("./input remap.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL Batch Remap output.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: batch remap.\nParameters:\nSpecies\t{}\n\n".format(VERSION, runtime, species))
out = "Input Assembly\tChromosome\tStart\tEnd\tStrand\tOutput Assembly\tChromosome\tStart\tEnd\tStrand\n"
print(out)
output.write(out)
for line in inputfile:
line = line.split("\t")
InputAssembly = line[0]
InputChr = line[1]
InputStart = line[2]
InputStop = line[3]
RequestedAssembly = line[4]
remap = client.remap_to_other_Assembly(species, InputAssembly, RequestedAssembly, InputChr, InputStart, InputStop)
out="\t".join(list(map(lambda x: str(x), [InputAssembly, InputChr, InputStart, InputStop, "1", remap])))
print(out)
output.write(out+"\n")
def batch_human_homolog():
#expected input: tab-separated, input assembly (GrCH[X]), chromosome, start, stop, output assembly (GrCh[X])
# client = EnsemblRestClient()
with open("./input human homolog.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL Human Homolog.txt".format(runtime), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: batch human homolog\n\n".format(VERSION, runtime))
out = "Input ID\tHuman homolog\n"
output.write(out)
print(out)
for line in inputfile:
ID = line.strip()
remap = client.get_condensed_human_homolog(ID)
out="{}\t{}".format(ID, remap)
print(out)
output.write(out+"\n")
def batch_get_sequence(type="gene"):
#expected input: ENSEMBL IDs, one ID per line. ID determines species.
# client = EnsemblRestClient()
with open("./input get sequence.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL {} Sequence Retrieval.txt".format(runtime, type), 'w') as output:
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: batch sequence retrieval\n\n".format(VERSION, runtime))
out = "Input ID\tProteinID\tSequence\n"
output.write(out)
print(out)
for line in inputfile:
ID = line.strip()
seqData = client.get_sequence_from_identifier(ID, type)
# for subseq in SequenceData:
# if "error" in subseq.keys(): return None
# if subseq["molecule"] != seqType:
# print("got: "+ subseq["molecule"] + ", requested: " + seqType)
# return None
# else: return SequenceData
if seqData:
for seq in seqData:
if (seq["molecule"] == type):
out="{}\t{}\t{}".format(ID, seq["id"], seq["seq"])
else:
out="{}\t{}\t{}".format(ID, seq["id"], "Warning: Mismatch. " + type + " requested, got " + seq["molecule"] + " instead.")
else:
out="{}\t0\tNone Found".format(ID)
print(out)
output.write(out+"\n")
def batch_SNP_r2_retrieval(species="human", population="1000GENOMES:phase_3:GBR"):
with open("./input rsquared retrieval.txt" , 'r') as inputfile:
runtime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
with open("./{} EMSEMBL r2 value retrieval.txt".format(runtime), 'w') as output:
out = ""
output.write("Running ENSEMBL API client, version {}.\nStartup time: {}\nSubroutine: batch SNP-SNP r2 value retrieval\nParameters:\nspecies\t{}\npopulation\t{}\n\n".format(VERSION, runtime, species, population))
output.write(out)
print(out)
line1 = inputfile.readline()
line2 = inputfile.readline()
if not inputfile.readline() == '': raise Exception("File appears to have wrong format")
SNPs1 = line1.split(" ")
SNPs1 = list(filter(lambda y: y!='', map(lambda x: x.strip(), SNPs1)))
SNPs2 = line2.split(" ")
SNPs2 = list(filter(lambda y: y!='', map(lambda x: x.strip(), SNPs2)))
output.write("\t" + "\t".join(SNPs2) + "\n")
for i, snp in enumerate(SNPs1):
output.write(snp)
for _snp in SNPs2:
r2 = client.get_r2_value(snp, _snp, species, population)
if r2 is not None:
r2 = r2[0]['r2']
output.write("\t" + r2)
print(snp, "*", _snp, ":", r2)
else:
output.write("\t")
print(snp, "*", _snp, ": NA")
output.write("\n")
print("Progress: {}%".format((i/len(SNPs1))*100))
if __name__ == "__main__":
global client
client = EnsemblRestClient()
#batch_SNPs()
#batch_SNP_Consequences()
#batch_Feature_Overlap("human", ["gene"])
#batch_Feature_Overlap("human", ["transcript", "gene"])
#batch_SNP_Contextualise()
#batch_remap("human")
#batch_Characterise()
#batch_human_homolog()
#batch_get_sequence("protein")
batch_SNP_r2_retrieval()
| LKBecker/ENSEMBL-API | ENSEMBL_API.py | Python | gpl-3.0 | 24,404 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
from collections import OrderedDict
import datetime
import dateutil
import email
try:
import simplejson as json
except ImportError:
import json
from lxml import etree
import logging
import pytz
import re
import socket
import time
import xmlrpclib
from email.message import Message
from email.utils import formataddr
from urllib import urlencode
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.osv.orm import BaseModel
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.exceptions import AccessError
_logger = logging.getLogger(__name__)
mail_header_msgid_re = re.compile('<[^<>]+>')
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
_mail_post_access = 'write'
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
# Mass mailing feature
_mail_mass_mailing = False
def get_empty_list_help(self, cr, uid, help, context=None):
""" Override of BaseModel.get_empty_list_help() to generate an help message
that adds alias information. """
model = context.get('empty_list_help_model')
res_id = context.get('empty_list_help_id')
ir_config_parameter = self.pool.get("ir.config_parameter")
catchall_domain = ir_config_parameter.get_param(cr, SUPERUSER_ID, "mail.catchall.domain", context=context)
document_name = context.get('empty_list_help_document_name', _('document'))
alias = None
if catchall_domain and model and res_id: # specific res_id -> find its alias (i.e. section_id specified)
object_id = self.pool.get(model).browse(cr, uid, res_id, context=context)
# check that the alias effectively creates new records
if object_id.alias_id and object_id.alias_id.alias_name and \
object_id.alias_id.alias_model_id and \
object_id.alias_id.alias_model_id.model == self._name and \
object_id.alias_id.alias_force_thread_id == 0:
alias = object_id.alias_id
if not alias and catchall_domain and model: # no res_id or res_id not linked to an alias -> generic help message, take a generic alias of the model
alias_obj = self.pool.get('mail.alias')
alias_ids = alias_obj.search(cr, uid, [("alias_parent_model_id.model", "=", model), ("alias_name", "!=", False), ('alias_force_thread_id', '=', False), ('alias_parent_thread_id', '=', False)], context=context, order='id ASC')
if alias_ids and len(alias_ids) == 1:
alias = alias_obj.browse(cr, uid, alias_ids[0], context=context)
if alias:
alias_email = alias.name_get()[0][1]
return _("""<p class='oe_view_nocontent_create'>
Click here to add new %(document)s or send an email to: <a href='mailto:%(email)s'>%(email)s</a>
</p>
%(static_help)s"""
) % {
'document': document_name,
'email': alias_email,
'static_help': help or ''
}
if document_name != 'document' and help and help.find("oe_view_nocontent_create") == -1:
return _("<p class='oe_view_nocontent_create'>Click here to add new %(document)s</p>%(static_help)s") % {
'document': document_name,
'static_help': help or '',
}
return help
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.is_read = False or n.is_read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><i class='fa fa-comments'/> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
res[id].pop('message_unread_count', None)
return res
def read_followers_data(self, cr, uid, follower_ids, context=None):
result = []
technical_group = self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'group_no_one', context=context)
for follower in self.pool.get('res.partner').browse(cr, uid, follower_ids, context=context):
is_editable = uid in map(lambda x: x.id, technical_group.users)
is_uid = uid in map(lambda x: x.id, follower.user_ids)
data = (follower.id,
follower.name,
{'is_editable': is_editable, 'is_uid': is_uid},
)
result.append(data)
return result
def _get_subscription_data(self, cr, uid, ids, name, args, user_pid=None, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
if user_pid is None:
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(
cr, uid, [
'&', ('hidden', '=', False), '|', ('res_model', '=', self._name), ('res_model', '=', False)
], context=context)
subtype_dict = OrderedDict(
(subtype.name, {
'default': subtype.default,
'followed': False,
'parent_model': subtype.parent_id and subtype.parent_id.res_model or self._name,
'id': subtype.id}
) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in [st for st in fol.subtype_ids if st.name in thread_subtype_dict]:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
self.message_unsubscribe(cr, uid, [id], list(old-new), context=context)
# add new followers
self.message_subscribe(cr, uid, [id], list(new-old), context=context)
def _search_followers(self, cr, uid, obj, name, args, context):
"""Search function for message_follower_ids
Do not use with operator 'not in'. Use instead message_is_followers
"""
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
# TOFIX make it work with not in
assert operator != "not in", "Do not search message_follower_ids with 'not in'"
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
def _search_is_follower(self, cr, uid, obj, name, args, context):
"""Search function for message_is_follower"""
res = []
for field, operator, value in args:
assert field == name
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if (operator == '=' and value) or (operator == '!=' and not value): # is a follower
res_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
else: # is not a follower or unknown domain
mail_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
res_ids = self.search(cr, uid, [('id', 'not in', mail_ids)], context=context)
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers, type='boolean',
fnct_search=_search_is_follower, string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many', priority=-10,
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_last_post': fields.datetime('Last Message Date',
help='Date of the last message posted on the record.'),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
def _get_user_chatter_options(self, cr, uid, context=None):
options = {
'display_log_button': False
}
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_user')[1]
is_employee = group_user_id in [group.id for group in group_ids]
if is_employee:
options['display_log_button'] = True
return options
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(mail_thread, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='message_ids']"):
options = json.loads(node.get('options', '{}'))
options.update(self._get_user_chatter_options(cr, uid, context=context))
node.set('options', json.dumps(options))
res['arch'] = etree.tostring(doc)
return res
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
if context.get('tracking_disable'):
return super(mail_thread, self).create(
cr, uid, values, context=context)
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid).partner_id.id
message_follower_ids = values.get('message_follower_ids') or [] # webclient can send None or False
message_follower_ids.append([4, pid])
values['message_follower_ids'] = message_follower_ids
thread_id = super(mail_thread, self).create(cr, uid, values, context=context)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
ir_model_pool = self.pool['ir.model']
ids = ir_model_pool.search(cr, uid, [('model', '=', self._name)], context=context)
name = ir_model_pool.read(cr, uid, ids, ['name'], context=context)[0]['name']
self.message_post(cr, uid, thread_id, body=_('%s created') % name, context=context)
# auto_subscribe: take values and defaults into account
create_values = dict(values)
for key, val in context.iteritems():
if key.startswith('default_'):
create_values[key[8:]] = val
self.message_auto_subscribe(cr, uid, [thread_id], create_values.keys(), context=context, values=create_values)
# track values
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
if not context.get('mail_notrack'):
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
initial_values = {thread_id: dict.fromkeys(tracked_fields, False)}
self.message_track(cr, uid, [thread_id], tracked_fields, initial_values, context=track_ctx)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if context.get('tracking_disable'):
return super(mail_thread, self).write(
cr, uid, ids, values, context=context)
# Track initial values of tracked fields
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = None
if not context.get('mail_notrack'):
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
records = self.browse(cr, uid, ids, context=track_ctx)
initial_values = dict((record.id, dict((key, getattr(record, key)) for key in tracked_fields))
for record in records)
# Perform write, update followers
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context, values=values)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=track_ctx)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy_data(self, cr, uid, id, default=None, context=None):
# avoid tracking multiple temporary changes during copy
context = dict(context or {}, mail_notrack=True)
return super(mail_thread, self).copy_data(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return dict: a dict mapping field name to description, containing
always tracked fields and modified on_change fields
"""
tracked_fields = []
for name, field in self._fields.items():
visibility = getattr(field, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
tracked_fields.append(name)
if tracked_fields:
return self.fields_get(cr, uid, tracked_fields, context=context)
return {}
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value.name_get()[0][1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for browse_record in self.browse(cr, uid, ids, context=context):
initial = initial_values[browse_record.id]
changes = set()
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
field = self._fields[col_name]
initial_value = initial[col_name]
record_value = getattr(browse_record, col_name)
if record_value == initial_value and getattr(field, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(
col_info=col_info['string'],
new_value=convert_for_display(record_value, col_info),
)
elif record_value != initial_value and (record_value or initial_value): # because browse null != False
if getattr(field, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(
col_info=col_info['string'],
old_value=convert_for_display(initial_value, col_info),
new_value=convert_for_display(record_value, col_info),
)
if col_name in tracked_fields:
changes.add(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
# By passing this key, that allows to let the subtype empty and so don't sent email because partners_to_notify from mail_message._notify will be empty
if not context.get('mail_track_log_only'):
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, browse_record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
subtype_rec = self.pool.get('ir.model.data').xmlid_to_object(cr, uid, subtype, context=context)
if not (subtype_rec and subtype_rec.exists()):
_logger.debug('subtype %s not found' % subtype)
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
def check_mail_message_access(self, cr, uid, mids, operation, model_obj=None, context=None):
""" mail.message check permission rules for related document. This method is
meant to be inherited in order to implement addons-specific behavior.
A common behavior would be to allow creating messages when having read
access rule on the document, for portal document such as issues. """
if not model_obj:
model_obj = self
if hasattr(self, '_mail_post_access'):
create_allow = self._mail_post_access
else:
create_allow = 'write'
if operation in ['write', 'unlink']:
check_operation = 'write'
elif operation == 'create' and create_allow in ['create', 'read', 'write', 'unlink']:
check_operation = create_allow
elif operation == 'create':
check_operation = 'write'
else:
check_operation = operation
model_obj.check_access_rights(cr, uid, check_operation)
model_obj.check_access_rule(cr, uid, mids, check_operation, context=context)
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" When redirecting towards the Inbox, choose which action xml_id has
to be fetched. This method is meant to be inherited, at least in portal
because portal users have a different Inbox action than classic users. """
return ('mail', 'action_mail_inbox_feeds')
def message_redirect_action(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
if context is None:
context = {}
# default action is the Inbox action
self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
act_model, act_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, *self._get_inbox_action_xml_id(cr, uid, context=context))
action = self.pool.get(act_model).read(cr, uid, [act_id], [])[0]
params = context.get('params')
msg_id = model = res_id = None
if params:
msg_id = params.get('message_id')
model = params.get('model')
res_id = params.get('res_id', params.get('id')) # signup automatically generated id instead of res_id
if not msg_id and not (model and res_id):
return action
if msg_id and not (model and res_id):
msg = self.pool.get('mail.message').browse(cr, uid, msg_id, context=context)
if msg.exists():
model, res_id = msg.model, msg.res_id
# if model + res_id found: try to redirect to the document or fallback on the Inbox
if model and res_id:
model_obj = self.pool.get(model)
if model_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
try:
model_obj.check_access_rule(cr, uid, [res_id], 'read', context=context)
action = model_obj.get_access_action(cr, uid, res_id, context=context)
except AccessError:
pass
action.update({
'context': {
'search_default_model': model,
'search_default_res_id': res_id,
}
})
return action
def _get_access_link(self, cr, uid, mail, partner, context=None):
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': partner.user_ids[0].login,
'action': 'mail.action_mail_redirect',
}
if mail.notification:
fragment['message_id'] = mail.mail_message_id.id
elif mail.model and mail.res_id:
fragment.update(model=mail.model, res_id=mail.res_id)
return "/web?%s#%s" % (urlencode(query), urlencode(fragment))
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_default_recipients(self, cr, uid, ids, context=None):
if context and context.get('thread_model') and context['thread_model'] in self.pool and context['thread_model'] != self._name:
if hasattr(self.pool[context['thread_model']], 'message_get_default_recipients'):
sub_ctx = dict(context)
sub_ctx.pop('thread_model')
return self.pool[context['thread_model']].message_get_default_recipients(cr, uid, ids, context=sub_ctx)
res = {}
for record in self.browse(cr, SUPERUSER_ID, ids, context=context):
recipient_ids, email_to, email_cc = set(), False, False
if 'partner_id' in self._fields and record.partner_id:
recipient_ids.add(record.partner_id.id)
elif 'email_from' in self._fields and record.email_from:
email_to = record.email_from
elif 'email' in self._fields:
email_to = record.email
res[record.id] = {'partner_ids': list(recipient_ids), 'email_to': email_to, 'email_cc': email_cc}
return res
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Returns the preferred reply-to email address that is basically
the alias of the document, if it exists. """
if context is None:
context = {}
model_name = context.get('thread_model') or self._name
alias_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
res = dict.fromkeys(ids, False)
# alias domain: check for aliases and catchall
aliases = {}
doc_names = {}
if alias_domain:
if model_name and model_name != 'mail.thread':
alias_ids = self.pool['mail.alias'].search(
cr, SUPERUSER_ID, [
('alias_parent_model_id.model', '=', model_name),
('alias_parent_thread_id', 'in', ids),
('alias_name', '!=', False)
], context=context)
aliases.update(
dict((alias.alias_parent_thread_id, '%s@%s' % (alias.alias_name, alias_domain))
for alias in self.pool['mail.alias'].browse(cr, SUPERUSER_ID, alias_ids, context=context)))
doc_names.update(
dict((ng_res[0], ng_res[1])
for ng_res in self.pool[model_name].name_get(cr, SUPERUSER_ID, aliases.keys(), context=context)))
# left ids: use catchall
left_ids = set(ids).difference(set(aliases.keys()))
if left_ids:
catchall_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.alias", context=context)
if catchall_alias:
aliases.update(dict((res_id, '%s@%s' % (catchall_alias, alias_domain)) for res_id in left_ids))
# compute name of reply-to
company_name = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).company_id.name
for res_id in aliases.keys():
email_name = '%s%s' % (company_name, doc_names.get(res_id) and (' ' + doc_names[res_id]) or '')
email_addr = aliases[res_id]
res[res_id] = formataddr((email_name, email_addr))
left_ids = set(ids).difference(set(aliases.keys()))
if left_ids and default:
res.update(dict((res_id, default) for res_id in left_ids))
return res
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
""" Get specific notification email values to store on the notification
mail_mail. Void method, inherit it to add custom values. """
res = dict()
return res
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool[model_name]
if hasattr(model, "message_process") and hasattr(model, "message_post"):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
:param string message: an email.message instance """
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
return filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, tools.email_split(s), context=context))
def message_route_verify(self, cr, uid, message, message_dict, route, update_author=True, assert_model=True, create_fallback=True, allow_private=False, context=None):
""" Verify route validity. Check and rules:
1 - if thread_id -> check that document effectively exists; otherwise
fallback on a message_new by resetting thread_id
2 - check that message_update exists if thread_id is set; or at least
that message_new exist
[ - find author_id if udpate_author is set]
3 - if there is an alias, check alias_contact:
'followers' and thread_id:
check on target document that the author is in the followers
'followers' and alias_parent_thread_id:
check on alias parent document that the author is in the
followers
'partners': check that author_id id set
"""
assert isinstance(route, (list, tuple)), 'A route should be a list or a tuple'
assert len(route) == 5, 'A route should contain 5 elements: model, thread_id, custom_values, uid, alias record'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
author_id = message_dict.get('author_id')
model, thread_id, alias = route[0], route[1], route[4]
model_pool = None
def _create_bounce_email():
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'body_html': '<div><p>Hello,</p>'
'<p>The following email sent to %s cannot be accepted because this is '
'a private email address. Only allowed people can contact us at this address.</p></div>'
'<blockquote>%s</blockquote>' % (message.get('to'), message_dict.get('body')),
'subject': 'Re: %s' % message.get('subject'),
'email_to': message.get('from'),
'auto_delete': True,
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
def _warn(message):
_logger.info('Routing mail with Message-Id %s: route %s: %s',
message_id, route, message)
# Wrong model
if model and not model in self.pool:
if assert_model:
assert model in self.pool, 'Routing: unknown target model %s' % model
_warn('unknown target model %s' % model)
return ()
elif model:
model_pool = self.pool[model]
# Private message: should not contain any thread_id
if not model and thread_id:
if assert_model:
if thread_id:
raise ValueError('Routing: posting a message without model should be with a null res_id (private message), received %s.' % thread_id)
_warn('posting a message without model should be with a null res_id (private message), received %s resetting thread_id' % thread_id)
thread_id = 0
# Private message: should have a parent_id (only answers)
if not model and not message_dict.get('parent_id'):
if assert_model:
if not message_dict.get('parent_id'):
raise ValueError('Routing: posting a message without model should be with a parent_id (private mesage).')
_warn('posting a message without model should be with a parent_id (private mesage), skipping')
return ()
# Existing Document: check if exists; if not, fallback on create if allowed
if thread_id and not model_pool.exists(cr, uid, thread_id):
if create_fallback:
_warn('reply to missing document (%s,%s), fall back on new document creation' % (model, thread_id))
thread_id = None
elif assert_model:
assert model_pool.exists(cr, uid, thread_id), 'Routing: reply to missing document (%s,%s)' % (model, thread_id)
else:
_warn('reply to missing document (%s,%s), skipping' % (model, thread_id))
return ()
# Existing Document: check model accepts the mailgateway
if thread_id and model and not hasattr(model_pool, 'message_update'):
if create_fallback:
_warn('model %s does not accept document update, fall back on document creation' % model)
thread_id = None
elif assert_model:
assert hasattr(model_pool, 'message_update'), 'Routing: model %s does not accept document update, crashing' % model
else:
_warn('model %s does not accept document update, skipping' % model)
return ()
# New Document: check model accepts the mailgateway
if not thread_id and model and not hasattr(model_pool, 'message_new'):
if assert_model:
if not hasattr(model_pool, 'message_new'):
raise ValueError(
'Model %s does not accept document creation, crashing' % model
)
_warn('model %s does not accept document creation, skipping' % model)
return ()
# Update message author if asked
# We do it now because we need it for aliases (contact settings)
if not author_id and update_author:
author_ids = self._find_partner_from_emails(cr, uid, thread_id, [email_from], model=model, context=context)
if author_ids:
author_id = author_ids[0]
message_dict['author_id'] = author_id
# Alias: check alias_contact settings
if alias and alias.alias_contact == 'followers' and (thread_id or alias.alias_parent_thread_id):
if thread_id:
obj = self.pool[model].browse(cr, uid, thread_id, context=context)
else:
obj = self.pool[alias.alias_parent_model_id.model].browse(cr, uid, alias.alias_parent_thread_id, context=context)
if not author_id or not author_id in [fol.id for fol in obj.message_follower_ids]:
_warn('alias %s restricted to internal followers, skipping' % alias.alias_name)
_create_bounce_email()
return ()
elif alias and alias.alias_contact == 'partners' and not author_id:
_warn('alias %s does not accept unknown author, skipping' % alias.alias_name)
_create_bounce_email()
return ()
if not model and not thread_id and not alias and not allow_private:
return ()
return (model, thread_id, route[2], route[3], route[4])
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param dict message_dict: dictionary holding message variables
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id, alias]
:raises: ValueError, TypeError
"""
if not isinstance(message, Message):
raise TypeError('message must be an email.message.Message at this point')
mail_msg_obj = self.pool['mail.message']
fallback_model = model
# Get email.message.Message variables for future processing
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To')
thread_references = references or in_reply_to
# 1. message is a reply to an existing message (exact match of message_id)
ref_match = thread_references and tools.reference_re.search(thread_references)
msg_references = mail_header_msgid_re.findall(thread_references)
mail_message_ids = mail_msg_obj.search(cr, uid, [('message_id', 'in', msg_references)], context=context)
if ref_match and mail_message_ids:
original_msg = mail_msg_obj.browse(cr, SUPERUSER_ID, mail_message_ids[0], context=context)
model, thread_id = original_msg.model, original_msg.res_id
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=False, create_fallback=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to msg: model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [route]
# 2. message is a reply to an existign thread (6.1 compatibility)
if ref_match:
reply_thread_id = int(ref_match.group(1))
reply_model = ref_match.group(2) or fallback_model
reply_hostname = ref_match.group(3)
local_hostname = socket.gethostname()
# do not match forwarded emails from another OpenERP system (thread_id collision!)
if local_hostname == reply_hostname:
thread_id, model = reply_thread_id, reply_model
if thread_id and model in self.pool:
model_obj = self.pool[model]
compat_mail_msg_ids = mail_msg_obj.search(
cr, uid, [
('message_id', '=', False),
('model', '=', model),
('res_id', '=', thread_id),
], context=context)
if compat_mail_msg_ids and model_obj.exists(cr, uid, thread_id) and hasattr(model_obj, 'message_update'):
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct thread reply (compat-mode) to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [route]
# 3. Reply to a private message
if in_reply_to:
mail_message_ids = mail_msg_obj.search(cr, uid, [
('message_id', '=', in_reply_to),
'!', ('message_id', 'ilike', 'reply_to')
], limit=1, context=context)
if mail_message_ids:
mail_message = mail_msg_obj.browse(cr, uid, mail_message_ids[0], context=context)
route = self.message_route_verify(cr, uid, message, message_dict,
(mail_message.model, mail_message.res_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, allow_private=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, mail_message.id, custom_values, uid)
return [route]
# 4. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0] for e in tools.email_split(rcpt_tos)]
if local_parts:
mail_alias = self.pool.get('mail.alias')
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
route = (alias.alias_model_id.model, alias.alias_force_thread_id, eval(alias.alias_defaults), user_id, alias)
route = self.message_route_verify(cr, uid, message, message_dict, route,
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, route)
routes.append(route)
return routes
# 5. Fallback to the provided parameters, if they work
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
route = self.message_route_verify(cr, uid, message, message_dict,
(fallback_model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, fallback_model, thread_id, custom_values, uid)
return [route]
# ValueError if no routes found and if no bounce occured
raise ValueError(
'No possible route found for incoming message from %s to %s (Message-Id %s:). '
'Create an appropriate mail.alias or force the destination model.' %
(email_from, email_to, message_id)
)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
# postpone setting message_dict.partner_ids after message_post, to avoid double notifications
context = dict(context or {})
partner_ids = message_dict.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id, alias in routes:
if self._name == 'mail.thread':
context['thread_model'] = model
if model:
model_pool = self.pool[model]
if not (thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new')):
raise ValueError(
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" %
(message_dict['message_id'], model)
)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True, mail_create_nolog=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], message_dict, context=nosub_ctx)
else:
thread_id = model_pool.message_new(cr, user_id, message_dict, custom_values, context=nosub_ctx)
else:
if thread_id:
raise ValueError("Posting a message without model should be with a null res_id, to create a private message.")
model_pool = self.pool.get('mail.thread')
if not hasattr(model_pool, 'message_post'):
context['thread_model'] = model
model_pool = self.pool['mail.thread']
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **message_dict)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, msg, model, thread_id, custom_values, context=context)
thread_id = self.message_route_process(cr, uid, msg_txt, msg, routes, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool[model]
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
# Be careful, content-type may contain tricky content like in the
# following example so test the MIME type with startswith()
#
# Content-Type: multipart/related;
# boundary="_004_3f1e4da175f349248b8d43cdeb9866f1AMSPR06MB343eurprd06pro_";
# type="text/html"
if not message.is_multipart() or message.get('content-type', '').startswith("text/"):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = False
mixed = False
html = u''
for part in message.walk():
if part.get_content_type() == 'multipart/alternative':
alternative = True
if part.get_content_type() == 'multipart/mixed':
mixed = True
if part.get_content_maintype() == 'multipart':
continue # skip container
# part.get_filename returns decoded value if able to decode, coded otherwise.
# original get_filename is not able to decode iso-8859-1 (for instance).
# therefore, iso encoded attachements are not able to be decoded properly with get_filename
# code here partially copy the original get_filename method, but handle more encoding
filename=part.get_param('filename', None, 'content-disposition')
if not filename:
filename=part.get_param('name', None)
if filename:
if isinstance(filename, tuple):
# RFC2231
filename=email.utils.collapse_rfc2231_value(filename).strip()
else:
filename=decode(filename)
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
# mutlipart/alternative have one text and a html part, keep only the second
# mixed allows several html parts, append html content
append_content = not alternative or (html and mixed)
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if not append_content:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.info('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To'].strip()))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
msg_list = mail_header_msgid_re.findall(decode(message['References']))
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in', [x.strip() for x in msg_list])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_partner_info_from_emails(cr, uid, obj.id, [email], context=context)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info['partner_id']], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner.id in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict((res_id, []) for res_id in ids)
if 'user_id' in self._fields:
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._fields['user_id'].string, context=context)
return result
def _find_partner_from_emails(self, cr, uid, id, emails, model=None, context=None, check_followers=True):
""" Utility method to find partners from email addresses. The rules are :
1 - check in document (model | self, id) followers
2 - try to find a matching partner that is also an user
3 - try to find a matching partner
:param list emails: list of email addresses
:param string model: model to fetch related record; by default self
is used.
:param boolean check_followers: check in document followers
"""
partner_obj = self.pool['res.partner']
partner_ids = []
obj = None
if id and (model or self._name != 'mail.thread') and check_followers:
if model:
obj = self.pool[model].browse(cr, uid, id, context=context)
else:
obj = self.browse(cr, uid, id, context=context)
for contact in emails:
partner_id = False
email_address = tools.email_split(contact)
if not email_address:
partner_ids.append(partner_id)
continue
email_address = email_address[0]
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_id = follower.id
# second try: check in partners that are also users
# Escape special SQL characters in email_address to avoid invalid matches
email_address = (email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_'))
email_brackets = "<%s>" % email_address
if not partner_id:
# exact, case-insensitive match
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', '=ilike', email_address),
('user_ids', '!=', False)],
limit=1, context=context)
if not ids:
# if no match with addr-spec, attempt substring match within name-addr pair
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', 'ilike', email_brackets),
('user_ids', '!=', False)],
limit=1, context=context)
if ids:
partner_id = ids[0]
# third try: check in partners
if not partner_id:
# exact, case-insensitive match
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', '=ilike', email_address)],
limit=1, context=context)
if not ids:
# if no match with addr-spec, attempt substring match within name-addr pair
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', 'ilike', email_brackets)],
limit=1, context=context)
if ids:
partner_id = ids[0]
partner_ids.append(partner_id)
return partner_ids
def message_partner_info_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids """
mail_message_obj = self.pool.get('mail.message')
partner_ids = self._find_partner_from_emails(cr, uid, id, emails, context=context)
result = list()
for idx in range(len(emails)):
email_address = emails[idx]
partner_id = partner_ids[idx]
partner_info = {'full_name': email_address, 'partner_id': partner_id}
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
# Escape special SQL characters in email_address to avoid invalid matches
email_address = (email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_'))
email_brackets = "<%s>" % email_address
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=ilike', email_address),
('email_from', 'ilike', email_brackets),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def _message_preprocess_attachments(self, cr, uid, attachments, attachment_ids, attach_model, attach_res_id, context=None):
""" Preprocess attachments for mail_thread.message_post() or mail_mail.create().
:param list attachments: list of attachment tuples in the form ``(name,content)``,
where content is NOT base64 encoded
:param list attachment_ids: a list of attachment ids, not in tomany command form
:param str attach_model: the model of the attachments parent record
:param integer attach_res_id: the id of the attachments parent record
"""
Attachment = self.pool['ir.attachment']
m2m_attachment_ids = []
if attachment_ids:
filtered_attachment_ids = Attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
Attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': attach_model, 'res_id': attach_res_id}, context=context)
m2m_attachment_ids += [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': attach_model,
'res_id': attach_res_id,
}
m2m_attachment_ids.append((0, 0, data_attach))
return m2m_attachment_ids
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', False) if self._name == 'mail.thread' else self._name
if model and model != self._name and hasattr(self.pool[model], 'message_post'):
del context['thread_model']
return self.pool[model].message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
#0: Find the message's author, because we need it for private discussion
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = self._message_preprocess_attachments(cr, uid, attachments, kwargs.pop('attachment_ids', []), model, thread_id, context)
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
subtype_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, subtype)
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and model and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model), ('type', '=', 'email')], context=context, order="id ASC", limit=1)
if not message_ids:
message_ids = message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': model and thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Post the message
msg_id = mail_message.create(cr, uid, values, context=context)
# Post-process: subscribe author, update message_last_post
if model and model != 'mail.thread' and thread_id and subtype_id:
# done with SUPERUSER_ID, because on some models users can post only with read access, not necessarily write access
self.write(cr, SUPERUSER_ID, [thread_id], {'message_last_post': fields.datetime.now()}, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and model and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, user_pid=user_pid, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
result = self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
if partner_ids and result:
self.pool['ir.ui.menu'].clear_cache()
return result
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
if context is None:
context = {}
# not necessary for computation, but saves an access right check
if not partner_ids:
return True
mail_followers_obj = self.pool.get('mail.followers')
subtype_obj = self.pool.get('mail.message.subtype')
user_pid = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if set(partner_ids) == set([user_pid]):
try:
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
except AccessError:
return False
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
existing_pids_dict = {}
fol_ids = mail_followers_obj.search(cr, SUPERUSER_ID, ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)])
for fol in mail_followers_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context):
existing_pids_dict.setdefault(fol.res_id, set()).add(fol.partner_id.id)
# subtype_ids specified: update already subscribed partners
if subtype_ids and fol_ids:
mail_followers_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
# subtype_ids not specified: do not update already subscribed partner, fetch default subtypes for new partners
if subtype_ids is None:
subtype_ids = subtype_obj.search(
cr, uid, [
('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
for id in ids:
existing_pids = existing_pids_dict.get(id, set())
new_pids = set(partner_ids) - existing_pids
# subscribe new followers
for new_pid in new_pids:
mail_followers_obj.create(
cr, SUPERUSER_ID, {
'res_model': self._name,
'res_id': id,
'partner_id': new_pid,
'subtype_ids': [(6, 0, subtype_ids)],
}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
result = self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
if partner_ids and result:
self.pool['ir.ui.menu'].clear_cache()
return result
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
# not necessary for computation, but saves an access right check
if not partner_ids:
return True
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
fol_obj = self.pool['mail.followers']
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', self._name),
('res_id', 'in', ids),
('partner_id', 'in', partner_ids)
], context=context)
return fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and getattr(field, 'track_visibility', False) and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
def _message_auto_subscribe_notify(self, cr, uid, ids, partner_ids, context=None):
""" Send notifications to the partners automatically subscribed to the thread
Override this method if a custom behavior is needed about partners
that should be notified or messages that should be sent
"""
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if partner_ids:
for record_id in ids:
message_obj = self.pool.get('mail.message')
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids:
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id)], limit=1, context=context)
if msg_ids:
self.pool.get('mail.notification')._notify(cr, uid, msg_ids[0], partners_to_notify=partner_ids, context=context)
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None, values=None):
""" Handle auto subscription. Two methods for auto subscription exist:
- tracked res.users relational fields, such as user_id fields. Those fields
must be relation fields toward a res.users record, and must have the
track_visilibity attribute set.
- using subtypes parent relationship: check if the current model being
modified has an header record (such as a project for tasks) whose followers
can be added as followers of the current records. Example of structure
with project and task:
- st_project_1.parent_id = st_task_1
- st_project_1.res_model = 'project.project'
- st_project_1.relation_field = 'project_id'
- st_task_1.model = 'project.task'
:param list updated_fields: list of updated fields to track
:param dict values: updated values; if None, the first record will be browsed
to get the values. Added after releasing 7.0, therefore
not merged with updated_fields argumment.
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
new_followers = dict()
# fetch auto_follow_fields: res.users relation fields whose changes are tracked for subscription
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch header subtypes
header_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, header_subtype_ids, context=context)
# if no change in tracked field or no change in tracked relational field: quit
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field is not False])
if not any(relation in updated_fields for relation in relation_fields) and not user_field_lst:
return True
# legacy behavior: if values is not given, compute the values by browsing
# @TDENOTE: remove me in 8.0
if values is None:
record = self.browse(cr, uid, ids[0], context=context)
for updated_field in updated_fields:
field_value = getattr(record, updated_field)
if isinstance(field_value, BaseModel):
field_value = field_value.id
values[updated_field] = field_value
# find followers of headers, update structure for new followers
headers = set()
for subtype in subtypes:
if subtype.relation_field and values.get(subtype.relation_field):
headers.add((subtype.res_model, values.get(subtype.relation_field)))
if headers:
header_domain = ['|'] * (len(headers) - 1)
for header in headers:
header_domain += ['&', ('res_model', '=', header[0]), ('res_id', '=', header[1])]
header_follower_ids = follower_obj.search(
cr, SUPERUSER_ID,
header_domain,
context=context
)
for header_follower in follower_obj.browse(cr, SUPERUSER_ID, header_follower_ids, context=context):
for subtype in header_follower.subtype_ids:
if subtype.parent_id and subtype.parent_id.res_model == self._name:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.parent_id.id)
elif subtype.res_model is False:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [values[name] for name in user_field_lst if values.get(name)]
user_pids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_pids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, ids, [pid], subtypes, context=context)
self._message_auto_subscribe_notify(cr, uid, ids, user_pids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
is_read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
self.pool.get('mail.notification').invalidate_cache(cr, uid, ['is_read'], context=context)
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
is_read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
self.pool.get('mail.notification').invalidate_cache(cr, uid, ['is_read'], context=context)
return True
#------------------------------------------------------
# Thread suggestion
#------------------------------------------------------
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Return a list of suggested threads, sorted by the numbers of followers"""
if context is None:
context = {}
# TDE HACK: originally by MAT from portal/mail_mail.py but not working until the inheritance graph bug is not solved in trunk
# TDE FIXME: relocate in portal when it won't be necessary to reload the hr.employee model in an additional bridge module
if 'is_portal' in self.pool['res.groups']._fields:
user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if any(group.is_portal for group in user.groups_id):
return []
threads = []
if removed_suggested_threads is None:
removed_suggested_threads = []
thread_ids = self.search(cr, uid, [('id', 'not in', removed_suggested_threads), ('message_is_follower', '=', False)], context=context)
for thread in self.browse(cr, uid, thread_ids, context=context):
data = {
'id': thread.id,
'popularity': len(thread.message_follower_ids),
'name': thread.name,
'image_small': thread.image_small
}
threads.append(data)
return sorted(threads, key=lambda x: (x['popularity'], x['id']), reverse=True)[:3]
def message_change_thread(self, cr, uid, id, new_res_id, new_model, context=None):
"""
Transfert the list of the mail thread messages from an model to another
:param id : the old res_id of the mail.message
:param new_res_id : the new res_id of the mail.message
:param new_model : the name of the new model of the mail.message
Example : self.pool.get("crm.lead").message_change_thread(self, cr, uid, 2, 4, "project.issue", context)
will transfert thread of the lead (id=2) to the issue (id=4)
"""
# get the sbtype id of the comment Message
subtype_res_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'mail.mt_comment', raise_if_not_found=True)
# get the ids of the comment and none-comment of the thread
message_obj = self.pool.get('mail.message')
msg_ids_comment = message_obj.search(cr, uid, [
('model', '=', self._name),
('res_id', '=', id),
('subtype_id', '=', subtype_res_id)], context=context)
msg_ids_not_comment = message_obj.search(cr, uid, [
('model', '=', self._name),
('res_id', '=', id),
('subtype_id', '!=', subtype_res_id)], context=context)
# update the messages
message_obj.write(cr, uid, msg_ids_comment, {"res_id" : new_res_id, "model" : new_model}, context=context)
message_obj.write(cr, uid, msg_ids_not_comment, {"res_id" : new_res_id, "model" : new_model, "subtype_id" : None}, context=context)
return True
| odoousers2014/odoo | addons/mail/mail_thread.py | Python | agpl-3.0 | 106,607 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Swiss Postfinance File Delivery Services module for Odoo
# Copyright (C) 2014 Compassion CH
# @author: Nicolas Tran
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
import logging
_logger = logging.getLogger(__name__)
class fds_postfinance_files(models.Model):
''' Model of the informations and files downloaded on FDS PostFinance
(Keep files in the database)
'''
_name = 'fds.postfinance.files'
fds_account_id = fields.Many2one(
comodel_name='fds.postfinance.account',
string='FDS account id',
ondelete='restrict',
readonly=True,
help='file related to FDS account id'
)
files = fields.Binary(
string='Files',
readonly=True,
help='the downloaded file'
)
bankStatment_id = fields.Many2one(
comodel_name='account.bank.statement',
string='Bank Statment id',
ondelete='restrict',
readonly=True,
help='the generate bank statment id'
)
filename = fields.Char(
string='Filename',
readonly=True,
help='The name of the file'
)
directory_id = fields.Many2one(
'fds.postfinance.files.directory',
string='Directory',
ondelete='restrict',
readonly=True,
help='location directory of the file'
)
journal_id = fields.Many2one(
comodel_name='account.journal',
related='directory_id.journal_id',
string='journal',
ondelete='restrict',
readonly=True,
help='default journal for this file'
)
state = fields.Selection(
selection=[('draft', 'Draft'),
('finish', 'Finish'),
('error', 'Error')],
readonly=True,
default='draft',
help='state of file'
)
##################################
# Button action #
##################################
@api.multi
def import_button(self):
''' convert the file to record of model bankStatment.
Called by pressing import button.
:return None:
'''
self.ensure_one()
if not self.directory_id.journal_id:
raise exceptions.Warning('Add default journal in acount conf')
self.import2bankStatements()
@api.multi
def change2error_button(self):
''' change the state of the file to error because the file is corrupt.
Called by pressing 'corrupt file?' button.
:return None:
'''
self.ensure_one()
self._sate_error_on()
@api.multi
def change2draft_button(self):
''' undo the file is corrupt to state draft.
Called by pressing 'cancel corrupt file' button.
:return None:
'''
self.ensure_one()
self.write({'state': 'draft'})
##############################
# function #
##############################
@api.multi
def import2bankStatements(self):
''' convert the file to a record of model bankStatment.
:returns bool:
- True if the convert was succeed
- False otherwise
'''
self.ensure_one()
try:
values = {
'journal_id': self.directory_id.journal_id.id,
'data_file': self.files}
bs_imoprt_obj = self.env['account.bank.statement.import']
bank_wiz_imp = bs_imoprt_obj.create(values)
bank_wiz_imp.import_file()
self._state_finish_on()
self._add_bankStatement_ref()
self._remove_binary_file()
_logger.info("[OK] import file '%s' to bank Statements",
(self.filename))
return True
except:
_logger.info("[FAIL] import file '%s' to bank Statements",
(self.filename))
return False
@api.multi
def _add_bankStatement_ref(self):
''' private function that add the reference to bank statement.
:returns None:
'''
bs = self.env['account.bank.statement'].search([
['state', '=', 'draft'],
['create_uid', '=', self.env.uid]])
self.write({'bankStatment_id': max(bs).id})
@api.multi
def _remove_binary_file(self):
''' private function that remove the binary file.
the binary file is already convert to bank statment attachment.
:returns None:
'''
self.write({'files': None})
@api.multi
def _state_finish_on(self):
''' private function that change state to finish
:returns: None
'''
self.ensure_one()
self.write({'state': 'finish'})
def _sate_error_on(self):
''' private function that change state to error
:returns: None
'''
self.ensure_one()
self.write({'state': 'error'})
| ndtran/l10n-switzerland | l10n_ch_fds_postfinance/model/fds_postfinance_files.py | Python | agpl-3.0 | 6,014 |
#!/usr/bin/env python
import os
from askgod import create_app, db
from askgod.models import *
import askgod.views
from flask.ext.script import Manager, Shell
from flask.ext.script.commands import Server
from flask.ext.migrate import Migrate, MigrateCommand
my_app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(my_app)
migrate = Migrate(my_app, db)
def make_shell_context():
return dict(app=my_app,
db=db,
User=User,
Challenge=Challenge,
)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command("runserver", Server(threaded=True, use_debugger=True, host="0.0.0.0", port=80))
if __name__ == '__main__':
manager.run()
# app.run(host='0.0.0.0', port=8080, debug=True)
| ppepos/drunken-shame | containers/askgod/app/src/run.py | Python | mit | 846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.