repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lsaffre/lino-welfare
|
lino_welfare/modlib/cbss/tx25.py
|
1
|
38017
|
# -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
# This is a masterpiece of untransparent code, difficult to understand
# and maintain. But I didn't find a better solution. Maybe an XSLT
# expert might help us to rewrite this from scratch. The purpose is very
# simple: transform the content of a Tx25 response into a printable
# document. A Tx25 response is a rather complex data structure with
# lots and lots of elements. It contains a handler for every element
# type
# In case you need to understand, consult the source code of
# :class:`RowFactory`.
from __future__ import unicode_literals
from builtins import str
from django.db import models
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from lino.api import dd
from lino.utils import AttrDict, IncompleteDate
from etgen import html as xghtml
E = xghtml.E
from .utils import cbss2gender
from .models import reply_has_result
from .ui import ConfidentialResultsTable
def rn2date(rd):
return IncompleteDate(
int(rd.Century + rd.Year),
int(rd.Month),
int(rd.Day))
def deldate(n):
if hasattr(n, 'DelDate'):
return [' (' + str(_('until ')) +
dd.dtos(rn2date(n.DelDate)) + ')']
return []
# def simpleattr(n,name):
# v = getattr(n,name,None)
# if v:
# return [ ', '+name+' ' + unicode(v)]
# return []
def simpletype(v):
return Info(xghtml.E.b(str(v)))
def boldstring(v):
return Info(xghtml.E.b(str(v)))
def validate_element(c):
if c is None:
raise Exception("Invalid element %r" % c)
class Info(object):
def __init__(self, *chunks):
for c in chunks:
validate_element(c)
self.chunks = list(chunks)
def addfrom(self, node, name, prefix=None, fmt=boldstring, suffix=''):
v = getattr(node, name, None)
if not v:
return self
if prefix is None:
prefix = '%s ' % name
else:
prefix = force_text(prefix)
if prefix and prefix[-1] not in ' :(':
prefix += ': '
if len(self.chunks):
if not prefix.startswith(' '):
prefix = ', ' + prefix
self.chunks += [prefix] + fmt(v).chunks
if suffix:
self.chunks.append(force_text(suffix))
return self
def add_deldate(self, n):
self.chunks += deldate(n)
def add_codelabel(self, n):
self.chunks += code_label(n).chunks
# if hasattr(n,'Label'):
# self.addfrom(n,'Label')
# self.addfrom(n,'Code','(',simpletype,')')
# else:
# self.addfrom(n,'Code','[',boldstring,']')
return self
def code_label(n):
chunks = []
if hasattr(n, 'Label') and n.Label:
chunks.append(xghtml.E.b(n.Label))
if hasattr(n, 'Code') and n.Code:
chunks += [' (', n.Code, ')']
return Info(*chunks)
# CodeLabel = code_label
# def CodeLabel(n):
# info = Info()
# return info
def NameType(n):
info = Info()
s = ' '.join([ln.Label for ln in n.LastName])
info.chunks.append(xghtml.E.b(s))
if hasattr(n, 'FirstName'):
info.chunks.append(', ')
s = ' '.join([fn.Label for fn in n.FirstName])
info.chunks.append(s)
return info
# def addinfo(node,name,prefix=None,fmt=simpletype,suffix=''):
# v = getattr(node,name,None)
# if not v: return []
# if prefix is None:
# prefix = ', %s ' % name
# info = [force_text(prefix)] + fmt(v)
# if suffix:
# info.append(force_text(suffix))
# return info
def DateType(n):
return Info(dd.dtos(rn2date(n)))
def ForfeitureDateType(n):
info = Info(dd.dtos(rn2date(n)))
info.addfrom(n, 'Graphic', ' (', simpletype, ')')
return info
def ExpiryDateType(n):
info = Info(dd.dtos(rn2date(n)))
info.addfrom(n, 'Graphic', ' (', simpletype, ')')
return info
def TribunalType(n):
return code_label(n)
def PlaceType(n):
return code_label(n)
def SituationType111(n):
return code_label(n)
def JustificationType(n):
return code_label(n)
def GraphicPlaceType(n):
info = CountryType(n.Country)
info.addfrom(n, 'Graphic', '')
# if hasattr(n,'Graphic'):
# info.append(', graphic:'+n.Graphic)
return info
def ForeignJudgementType(n):
return GraphicPlaceType(n.Place)
def BelgianJudgementType(n):
info = Info()
info.addfrom(n, 'Tribunal', None, TribunalType)
info.addfrom(n, 'Date', None, DateType)
info.addfrom(n, 'Place', None, PlaceType)
# info += TribunalType(n.Tribunal)
# info += DateType(n.Date)
# info += PlaceType(n.Place)
return info
def CountryType(n):
return code_label(n)
def LieuType(n):
info = Info()
if hasattr(n, 'Place1'):
# info += code_label(n.Place1)
info.addfrom(n, 'Place1', None, code_label)
elif hasattr(n, 'Place2'):
info.addfrom(n, 'Place2', None, GraphicPlaceType)
else:
place = n.Place3
# info += GraphicPlaceType(place)
info.addfrom(place, 'BelgianJudgement', '', BelgianJudgementType)
info.addfrom(place, 'ForeignJudgement', '', ForeignJudgementType)
# if hasattr(place,'BelgianJudgement'):
# info += BelgianJudgementType(place.BelgianJudgement)
# else:
# info += ForeignJudgementType(place.ForeignJudgement)
return info
def DiplomaticPostType(n):
return code_label(n)
def TerritoryType(n):
return code_label(n)
def ProvinceType(n):
return code_label(n)
def IssuerType(n):
# prefixes can be empty since this is a xs:choice
info = Info().addfrom(n, 'Place', '', PlaceType)
info.addfrom(n, 'Province', '', ProvinceType, ' (%s)' %
str(_("Province")))
info.addfrom(n, 'PosteDiplomatique', '', DiplomaticPostType, ' (%s)' %
str(_("Diplomatic post")))
return info
def ResidenceType(n):
return code_label(n)
def NationalNumberType(n):
info = Info().addfrom(n, 'NationalNumber', '')
return info # [n.NationalNumber]
def PartnerType(n):
info = Info().addfrom(n, 'NationalNumber', '', NationalNumberType)
# info.addfrom(n,'Name','',NameType)
info.addfrom(n, 'Name', ' ', NameType)
return info
def NotaryType(n):
info = Info().addfrom(n, 'NameNotary')
info.addfrom(n, 'Place', ' in ', PlaceType)
info.addfrom(n, 'Country', ', ', CountryType)
return info
def NotificationType(n):
info = Info().addfrom(n, 'NotificationDate', None, DateType)
info.addfrom(n, 'Place', ' in ', PlaceType)
return info
def ReasonType(n):
return code_label(n)
def CessationType(n):
return code_label(n)
def DeclarationType(n):
return code_label(n)
def Residence(n):
info = Info().addfrom(n, 'Residence', '', ResidenceType)
info.addfrom(n, 'Fusion', _("Fusion"))
info.addfrom(n, 'Language', _("Language"))
info.add_deldate(n)
return info
def IT003(n): # AscertainedLegalMainAddresses : Détermination de résidence
# raise Exception(str(n))
def InvestigationResultType(n):
return code_label(n)
info = Info().addfrom(n, 'InvestigationResult',
'', InvestigationResultType)
info.addfrom(n, 'Graphic1', '')
info.addfrom(n, 'Graphic2', '')
info.add_deldate(n)
return info
def IT005(n): # AddressChangeIntention
# raise Exception(str(n))
info = Info().addfrom(n, 'OriginPlace', _('Move from '), PlaceType)
info.addfrom(n, 'DestinationPlace', _('Move to '), PlaceType)
info.add_deldate(n)
return info
def IT006(n):
info = Info()
info.addfrom(n, 'Country', '', CountryType)
info.addfrom(n, 'Graphic', ' ')
info.add_deldate(n)
return info
def IT008(n): # ReturnPermissions
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'ExpiryDate', _("expires "), DateType)
info.add_deldate(n)
return info
def IT011(n): # Pseudonymes
info = Info()
info.addfrom(n, 'Name', '', NameType)
info.add_deldate(n)
return info
def IT013(n):
info = Info()
info.addfrom(n, 'ModificationType', '', ModificationTypeType)
info.addfrom(n, 'Graphic', '')
info.add_deldate(n)
return info
def IT018(n):
info = Info()
info.addfrom(n, 'Address', '', AddressType)
info.add_deldate(n)
return info
def IT024(n):
info = Info()
info.add_deldate(n)
return info
def TypeOfBurialType(n):
return code_label(n)
def LegalRepresentativeType(n):
info = Info()
info.addfrom(n, 'NationalNumber', " ", NationalNumberType)
info.addfrom(n, 'Graphic', " ")
return info
def IT152(n): # BurialModes, Mode de sépulture
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'TypeOfBurial', "", TypeOfBurialType)
info.addfrom(n, 'LegalRepresentative', "", LegalRepresentativeType)
info.add_deldate(n)
return info
def IT023(n): # PostalAddressAbroad, Adresse postale à l'étranger
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'Address', "", AddressType)
info.add_deldate(n)
return info
def TypeOfAbsenceType(n):
return Info(E.b(n.Code))
def IT026(n): # TemporaryAbsences
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'TypeOfAbsence', "", TypeOfAbsenceType)
info.addfrom(n, 'Graphic1', " ")
info.addfrom(n, 'Graphic2', " ")
info.add_deldate(n)
return info
def IT028(n):
info = Info()
info.addfrom(n, 'LegalFact', '', code_label)
info.addfrom(n, 'Graphic', '')
info.addfrom(n, 'ExpiryDate', _("expires "), DateType)
info.add_deldate(n)
return info
def IT208(n):
info = Info()
# info.addfrom(n,'Date','',DateType)
info.addfrom(n, 'PseudoNationalNumber', '')
info.add_deldate(n)
return info
def IT073(n):
info = Info()
info.addfrom(n, 'Category', '', CategoryType)
info.addfrom(n, 'CertificateNumber', _("no."))
info.add_deldate(n)
return info
def IT074(n):
info = Info()
info.addfrom(n, 'SerialNumber')
info.addfrom(n, 'IdentificationNumber')
info.add_deldate(n)
return info
def FiliationType(n):
return code_label(n)
def ParentType(n):
info = Info()
info.addfrom(n, 'Name', '', NameType)
info.addfrom(n, 'NationalNumber', ' (', NationalNumberType, ')')
return info
def StreetType(n):
# we don't print the code of streets
info = Info()
info.addfrom(n, 'Label', '')
# info.addfrom(n,'NationalNumber',' (',NationalNumberType,')')
return info
# return code_label(n)
def IT020(n):
def AddressType020(n):
info = Info()
info.addfrom(n, 'ZipCode', '')
info.addfrom(n, 'Street', '', StreetType)
info.addfrom(n, 'HouseNumber', _('no. '))
info.addfrom(n, 'Box', ' ')
return info
info = Info()
info.addfrom(n, "Address", '', AddressType020)
return info
def IT110(n):
# Filiation ascendante
info = Info()
info.addfrom(n, 'FiliationType', '', FiliationType)
info.addfrom(n, 'Parent1', _('of '), ParentType)
info.addfrom(n, 'Parent2', _('and '), ParentType)
info.addfrom(n, 'ActNumber', _("Act no. "))
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Graphic', " ")
info.add_deldate(n)
return info
def IT111(n):
# Statut de la personne représentée ou assistée
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'Justification', '', JustificationType)
info.addfrom(n, 'Situation', '', SituationType111)
info.addfrom(n, 'Graphic', " ")
info.add_deldate(n)
return info
def IT113(n): # Guardian : Personne qui représente ou assiste
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'Status', _("Status"), code_label)
info.addfrom(n, 'Justification', _("Justification"), code_label)
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Graphic', " ")
info.addfrom(n, 'Country', " ", CountryType)
info.add_deldate(n)
return info
def IT140(n):
info = Info().addfrom(n, 'Name', ' ', NameType)
info.addfrom(n, 'NationalNumber', ' (', NationalNumberType, ')')
# info += _(' as ')
info.addfrom(n, 'FamilyRole', _('as '), code_label)
info.addfrom(n, 'Housing', None, HousingType)
info.add_deldate(n)
return info
def IT141(n):
info = Info()
info.addfrom(n, 'Housing', None, HousingType)
info.addfrom(n, 'FamilyRole', '', code_label)
info.addfrom(n, 'Name', _('in family headed by '), NameType)
info.addfrom(n, 'NationalNumber', ' (', NationalNumberType, ')')
info.add_deldate(n)
return info
def NationalityType(n):
return code_label(n)
def IT213(n): # Alias
info = Info()
info.addfrom(n, 'Name', '', NameType)
info.addfrom(n, 'Nationality', None, NationalityType)
info.addfrom(n, 'BirthDate', _(' born '), DateType)
info.addfrom(n, 'BirthPlace', _(' in '))
info.add_deldate(n)
return info
def TypeOfLicenseType(n):
return code_label(n)
def TypeOfLicenseType194(n):
return code_label(n)
def DeliveryType206(n):
v = getattr(n, 'Place', None)
if v:
return PlaceType(v)
return CountryType(n.Country)
def DeliveryType194(n):
info = Info().addfrom(n, 'Place', _('in '), PlaceType)
info.addfrom(n, 'Label', '')
info.addfrom(n, 'Code', ' (', simpletype, ')')
# info.add_codelabel(n)
# info += code_label(n)
return info
def CategoryType(n):
return code_label(n)
def GearBoxType(n):
return code_label(n)
def MedicalType(n):
return code_label(n)
def LicenseCategoriesType(n):
info = Info()
# raise Exception(str(n))
# for cat in n.Category:
# info.addfrom(cat,'Category',' ',CategoryType)
info.chunks.append('/'.join([cat.Label for cat in n.Category]))
# info += code_label(n)
return info
def ForfeitureReasonType(n):
return code_label(n)
def IT191(n):
# info = code_label(n.TypeOfLicense)
info = Info().addfrom(n, 'TypeOfLicense', '', TypeOfLicenseType)
info.addfrom(n, 'LicenseNumber', _('no. '))
info.addfrom(n, 'Place', _('delivered in '), PlaceType)
info.addfrom(n, 'DeliveryCountry', ' (', CountryType, ')')
info.addfrom(n, 'ForfeitureReason', None, ForfeitureReasonType)
info.addfrom(n, 'ForfeitureDate', None, ForfeitureDateType)
# info.append()
# info.append(E.b(n.LicenseNumber))
# info.append(', categories '
# + ' '.join([cat.Label for cat in n.Categories.Category]))
# info.append(_(' delivered in '))
# info += code_label(n.Delivery.Place)
info.add_deldate(n)
return info
def IT194(n):
info = Info().addfrom(n, 'TypeOfLicense', '', TypeOfLicenseType194)
info.addfrom(n, 'Categories', _('categories '), LicenseCategoriesType)
info.addfrom(n, 'LicenseNumber', _('no. '))
info.addfrom(n, 'Delivery', _('delivered '), DeliveryType194)
info.addfrom(n, 'GearBox', None, GearBoxType)
info.addfrom(n, 'Medical', None, MedicalType)
info.addfrom(n, 'ExpiryDate', _('expires '), ExpiryDateType)
info.add_deldate(n)
return info
def IT198(n):
info = Info().addfrom(n, 'PermitNumber', _('no. '))
info.addfrom(n, 'Categories', _('categories '), LicenseCategoriesType)
info.addfrom(n, 'LicenseNumber', _('no. '))
info.addfrom(n, 'Delivery', _('delivered '), DeliveryType194)
info.addfrom(n, 'GearBox', None, GearBoxType)
info.addfrom(n, 'Medical', None, MedicalType)
info.addfrom(n, 'ExpiryDate', _('expires '), ExpiryDateType)
info.add_deldate(n)
return info
def TypeOfPassportType(n):
return code_label(n)
def PassportIdentType(n):
info = Info()
info.addfrom(n, 'PassportType', _('type '), TypeOfPassportType)
info.addfrom(n, 'PassportNumber', _('no. '))
return info
def IT199(n):
info = Info()
# info.chunks.append('Number ')
# info.chunks.append(E.b(n.PassportIdent.PassportNumber))
# info.append(', status ')
info.addfrom(n, 'Status', _("status"), code_label)
info.addfrom(n, 'PassportIdent', '', PassportIdentType)
info.addfrom(n, 'Issuer', _('issued by '), IssuerType)
info.addfrom(n, 'RenewalNumber', _('renewal no. '), boldstring)
info.addfrom(n, 'SerialNumber', _('serial no. '), boldstring)
info.addfrom(n, 'SecondNumber', _('second no. '), boldstring)
info.addfrom(n, 'ReplacementOf', _('replacement of '), boldstring)
info.addfrom(n, 'AdditionTo', _('addition to '), boldstring)
info.addfrom(n, 'ProductionDate', _('produced '), DateType)
info.addfrom(n, 'ExpiryDate', _('expires '), DateType)
# info.append(', type ')
# info += code_label(n.PassportIdent.PassportType)
# info.append(', expires ')
# info.append(E.b(dd.dtos(rn2date(n.ExpiryDate))))
# info.append(', delivered by ')
# info += code_label(n.Issuer.PosteDiplomatique)
# info.append(_(' renewal no. '))
# info.append(E.b(n.RenewalNumber))
info.add_deldate(n)
return info
def HousingType(n):
return code_label(n)
def ModificationTypeType(n):
return code_label(n)
def AddressType(n):
info = Info()
# pd = n.Address.Address
info.addfrom(n, 'Country', '', CountryType)
# info.append(', ')
info.addfrom(n, 'Graphic1', '')
info.addfrom(n, 'Graphic2', '')
info.addfrom(n, 'Graphic3', '')
# info.append(E.b(pd.Graphic1))
# info.append(', ')
# info.append(E.b(pd.Graphic2))
# info.append(', ')
# info.append(E.b(pd.Graphic3))
# info.addfrom(pd,'Graphic3')
return info
def CertificateType(n):
return code_label(n)
def IT200(n):
info = Info().addfrom(n, 'PublicSecurityNumber', _('no. '))
info.add_deldate(n)
return info
def IT202(n):
info = Info()
info.addfrom(n, 'Graphic1', '')
info.addfrom(n, 'Graphic2', '')
info.addfrom(n, 'Limosa', '', LimosaType)
info.add_deldate(n)
return info
def LimosaType(n):
info = Info()
info.addfrom(n, 'Reason1', '', LimosaReasonType)
info.addfrom(n, 'Reason2', '', LimosaReasonType)
info.addfrom(n, 'NationalNumber', _('SSIN '), NationalNumberType)
return info
def LimosaReasonType(n):
return code_label(n)
def IT205(n):
info = code_label(n)
info.add_deldate(n)
return info
def OrganizationType(n):
return code_label(n)
def GeneralInfoType(n):
info = code_label(n)
info.addfrom(n, 'Organization', _("Organization"), OrganizationType)
return info
def OrigineType(n):
return Info().add_codelabel(n)
def AppealType(n):
return code_label(n)
def StatusAppealType(n):
return code_label(n)
def ProcedureType(n):
info = Info()
info.addfrom(n, 'Origine', None, OrigineType)
info.addfrom(n, 'Reference')
info.addfrom(n, 'Appeal', None, AppealType)
info.addfrom(n, 'OpenClose', None, StatusAppealType)
info.addfrom(n, 'NationalNumber', _('SSIN '), NationalNumberType)
return info
def DecisionCancelledType(n):
info = Info()
info.addfrom(n, 'Date', None, DateType)
info.addfrom(n, 'Reference')
return info
def DelayLeaveGrantedType(n):
info = Info()
info.addfrom(n, 'Date', None, DateType)
return info
def StrikingOutType(n):
info = Info()
info.addfrom(n, 'Reference')
info.addfrom(n, 'OpenClose', None, OpenCloseType)
info.addfrom(n, 'Status', None, StrikingStatusType)
return info
def StrikingStatusType(n):
return code_label(n)
def TerritoryLeftType(n):
return code_label(n)
def OpenCloseType(n):
return code_label(n)
def ProtectionType(n):
info = code_label(n)
info.addfrom(n, 'Reference')
info.addfrom(n, 'Term')
return info
def AdviceFromCGVSType(n):
info = code_label(n)
info.addfrom(n, 'Reference')
return info
def ApplicationFiledType(n):
info = code_label(n)
info.addfrom(n, 'Place', _("in "), PlaceType)
return info
def DecisionType206(n):
# print 20150513, unicode(n).encode("ascii", errors="replace")
info = code_label(n)
info.addfrom(n, 'Reference', _("Reference"))
info.addfrom(n, 'OpenClose', _("Open/Close"), OpenCloseType)
info.addfrom(n, 'Comments')
info.addfrom(n, 'Term')
return info
def NotificationByDVZType(n):
info = Info()
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Reference')
return info
def NotificationByOrgType(n):
info = Info()
info.addfrom(n, 'Reference')
return info
def AppealLodgedType(n):
info = Info()
info.addfrom(n, 'Reference')
return info
def IT206(n):
def Status(n):
info = Info()
info.addfrom(n, 'Status')
return info
info = Info()
info.addfrom(n, 'GeneralInfo', '', GeneralInfoType)
info.addfrom(n, 'Procedure', _("Procedure"), ProcedureType)
info.addfrom(n, 'StrikingOut', None, StrikingOutType)
info.addfrom(n, 'DecisionCancelled',
_("Decision cancelled"), DecisionCancelledType)
info.addfrom(n, 'Protection', _("Protection"), ProtectionType)
info.addfrom(n, 'DelayLeaveGranted', None, DelayLeaveGrantedType)
info.addfrom(n, 'Escape', _("Escape"), Status)
info.addfrom(n, 'UnrestrictedStay', None, Status)
info.addfrom(n, 'ApplicationRenounced', _("Application renounced"), Status)
info.addfrom(n, 'TerritoryLeft', _("Territory left"), TerritoryLeftType)
info.addfrom(n, 'AdviceFromCGVS', None, AdviceFromCGVSType)
info.addfrom(n, 'Decision', _("Decision"), DecisionType206)
info.addfrom(n, 'ApplicationFiled',
_("Application filed"), ApplicationFiledType)
info.addfrom(n, 'NotificationByDVZ', None, NotificationByDVZType)
info.addfrom(n, 'NotificationByOrg', None, NotificationByOrgType)
info.addfrom(n, 'AppealLodged', None, AppealLodgedType)
info.add_deldate(n)
return info
def InitiativeType(n):
return code_label(n)
def SocialWelfareType(n):
info = Info()
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Initiative', None, InitiativeType)
info.add_deldate(n)
return info
def RefugeeCentreType(n):
return code_label(n)
def IT207(n):
info = Info()
info.addfrom(n, 'SocialWelfare',
_("Social Welfare Centre"), SocialWelfareType)
info.addfrom(n, 'RefugeeCentre', _("Refugee Centre"), RefugeeCentreType)
info.add_deldate(n)
return info
def RegistrationRegisterType(n):
return code_label(n)
def IT210(n):
info = Info()
info.addfrom(n, 'RegistrationRegister',
_("Registration register"), RegistrationRegisterType)
info.add_deldate(n)
return info
def IdentificationType(n):
return code_label(n)
def IT211(n):
info = Info()
info.addfrom(n, 'TypeOfDocument', '', IdentificationType)
info.add_deldate(n)
return info
def ChoosenResidenceType(n):
return code_label(n)
def IT212(n):
info = Info().addfrom(n, 'Residence', None, ChoosenResidenceType)
info.addfrom(n, 'Graphic', '')
info.add_deldate(n)
return info
def IT251(n):
info = Info()
info.add_deldate(n)
return info
def IT192(n):
info = Info().addfrom(n, 'Declaration', '', DeclarationType)
info.addfrom(n, 'Place', _('in '), PlaceType)
info.add_deldate(n)
return info
HANDLERS = dict()
def register_it_handler(name, label, subname, itname):
HANDLERS[name] = (label, subname, itname)
register_it_handler('WorkPermits', _("Work Permits"), 'WorkPermit', 'IT198')
register_it_handler(
'PublicSecurityNumbers',
_("Public Security Numbers"), 'PublicSecurityNumber', 'IT200')
register_it_handler('SpecialInfos', _("Special Infos"), 'SpecialInfo', 'IT202')
register_it_handler('RefugeeTypes', _("Refugee Types"), 'RefugeeType', 'IT205')
register_it_handler('StatusOfRefugee', _("Status of refugee"),
'StatusOfRefugee', 'IT206')
register_it_handler('Passports', _("Passports"), 'Passport', 'IT199')
register_it_handler(
'OrganizationsInCharge',
_("Organizations in charge"), 'OrganizationInCharge', 'IT207')
register_it_handler(
'RegistrationRegisters',
_("Registration registers"), 'RegistrationRegister', 'IT210')
register_it_handler('ChoosenResidences',
_("Choosen residences"), 'ChoosenResidence', 'IT212')
register_it_handler('OrganDonations', _("Organ Donations"),
'OrganDonation', 'IT192')
register_it_handler('ResidenceUpdateDates',
_("Residence Update Dates"), 'ResidenceUpdateDate',
'IT251')
register_it_handler('DocumentTypes', _("Document Types"),
'DocumentType', 'IT211')
register_it_handler('NameModifications',
_("Name Modifications"), 'NameModification', 'IT013')
register_it_handler('CountriesOfOrigin',
_("Countries Of Origin"), 'CountryOfOrigin', 'IT006')
register_it_handler('ReturnPermissions',
_("Return permissions"), 'ReturnPermission', 'IT008')
register_it_handler('AddressDeclarationAbroad',
_("Address Declaration Abroad"), 'Address', 'IT018')
register_it_handler('TemporaryRegistrations',
_("Inscriptions Temporaires"),
'TemporaryRegistration', 'IT028')
register_it_handler('SpecialRetirementCertificates',
_("Special Retirement Certificates"),
'SpecialRetirementCertificate',
'IT074')
register_it_handler('RetirementCertificates',
_("Retirement Certificates"), 'RetirementCertificate',
'IT073')
register_it_handler('Guardians',
_("Guardians"), 'Guardian', 'IT113')
register_it_handler('PseudoNationalNumbers',
_("Pseudo National Numbers"), 'PseudoNationalNumber',
'IT208')
register_it_handler('TemporaryAbsences',
_("Temporary absences"), 'TemporaryAbsence', 'IT026')
register_it_handler('BurialModes',
_("Burial modes"), 'BurialMode', 'IT152')
register_it_handler('PostalAddressAbroad',
_("Postal address abroad"), 'PostalAddressAbroad', 'IT023')
register_it_handler('ParentalAuthorities',
_("Parental authorities"), 'ParentalAuthority', 'IT111')
class RowFactory(object):
# The result of a Tx25 consist of data rows, each of which has a
# given type. Consult the source code of this class to see how it
# works.
def start_group(self, group):
self.current_group = group
self.counter = 0
def datarow(self, node, since, info):
group = self.current_group
self.counter += 1
if node.__class__.__name__.startswith('IT'):
itnum = node.__class__.__name__[2:]
else:
itnum = ''
if hasattr(node, 'Type'):
group += " " + node.Type
# if hasattr(node,'Status'):
# group += " " + unicode(node.Status)
if hasattr(node, 'Structure'):
group += " " + node.Structure
return AttrDict(group=group,
counter=self.counter,
type=itnum,
since=rn2date(since),
info=E.p(*info.chunks))
def get_it_handler(self, itnode):
t = HANDLERS.get(itnode.__class__.__name__, None)
if t is None:
return t
g, subname, itname = t
it = globals().get(itname)
def f(node, name):
self.start_group(g)
for n in getattr(node, subname):
info = it(n)
yield self.datarow(n, n.Date, info)
return f
def IT000(self, n, name):
self.start_group(_("National Number"))
n = n.NationalNumber
info = Info(
E.b(n.NationalNumber),
' (' + str(cbss2gender(n.Sex)) + ')')
yield self.datarow(n, n.Date, info)
def IT019(self, n, name):
self.start_group(_("Address Change Declaration"))
info = Info()
def AddressType(n):
info = Info()
info.addfrom(n, 'Graphic', '')
return info
info.addfrom(n, 'Address', '', AddressType)
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def FileOwner(self, fo, name):
self.start_group(_("Residences"))
for n in fo.Residences:
info = Residence(n)
yield self.datarow(n, n.Date, info)
def AscertainedLegalMainAddresses(self, fo, name):
# Détermination de résidence
self.start_group(_("Ascertained Legal Main Addresses"))
# raise Exception(str(fo))
# raise Exception(repr([n for n in fo]))
for n in fo.AscertainedLegalMainAddress:
info = IT003(n)
yield self.datarow(n, n.Date, info)
def Pseudonyms(self, fo, name):
self.start_group(_("Pseudonyms")) # Pseudonymes
for n in fo.Pseudonym:
info = IT011(n)
yield self.datarow(n, n.Date, info)
def Aliases(self, fo, name):
self.start_group(_("Aliases"))
for n in fo.Alias:
info = IT213(n)
yield self.datarow(n, n.Date, info)
def AddressChangeIntention(self, fo, name):
self.start_group(
_("Address Change Intention")) # Intention de changer l'adresse
for n in fo.Address:
info = IT005(n)
yield self.datarow(n, n.Date, info)
def AddressReferences(self, fo, name):
self.start_group(_("Address References")) # Adresse de référence
for n in fo.AddressReference:
info = IT024(n)
yield self.datarow(n, n.Date, info)
def Names(self, node, name):
self.start_group(_("Names"))
# group = name
for n in node.Name:
info = Info().addfrom(n, 'Name', '', NameType)
yield self.datarow(n, n.Date, info)
def LegalMainAddresses(self, node, name):
self.start_group(_("Legal Main Addresses"))
for n in node.LegalMainAddress:
yield self.datarow(n, n.Date, IT020(n))
def ResidenceAbroad(self, node, name): # IT022
def ResidenceAbroadAddressType(n):
info = Info('Address')
info.addfrom(n, 'PosteDiplomatique', None, DiplomaticPostType)
info.addfrom(n, 'Territory', ' ', TerritoryType)
info.addfrom(n, 'Address', ' ', AddressType)
return info
self.start_group(_("Residence Abroad"))
for n in node.ResidenceAbroad:
info = Info()
info.addfrom(n, 'Address', '', ResidenceAbroadAddressType)
# info += code_label(n.Address.PosteDiplomatique)
# info.append(', ')
# info += code_label(n.Address.Territory)
# info.append(', ')
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def Nationalities(self, node, name):
self.start_group(_("Nationalities"))
for n in node.Nationality:
info = code_label(n.Nationality)
yield self.datarow(n, n.Date, info)
def Occupations(self, node, name):
self.start_group(_("Occupations"))
for n in node.Occupation:
info = code_label(n.Occupation)
info.addfrom(n, 'SocialCategory', ' (SC ', code_label, ')')
yield self.datarow(n, n.Date, info)
def IT100(self, n, name):
self.start_group(_("Birth Place"))
info = Info()
info.addfrom(n, 'Place1', _('in '), PlaceType)
info.addfrom(n, 'Place2', _('in '), GraphicPlaceType)
info.addfrom(n, 'ActNumber', _("Act no. "))
info.addfrom(n, 'SuppletoryRegister')
yield self.datarow(n, n.Date, info)
def IT101(self, n, name):
self.start_group(
_("Declared Birth Date")) # Date de naissance déclarée
info = Info()
info.addfrom(n, 'DeclaredBirthDate', '', DateType)
info.addfrom(n, 'Certificate', '', CertificateType)
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def Filiations(self, node, name):
self.start_group(_("Filiations"))
for n in node.Filiation:
info = IT110(n)
yield self.datarow(n, n.Date, info)
def CivilStates(self, node, name):
self.start_group(_("Civil States")) # IT120
for n in node.CivilState:
info = code_label(n.CivilState)
if hasattr(n, 'Spouse'):
# info.append(' with ')
# info += name2info(n.Spouse.Name)
info.addfrom(n.Spouse, 'Name', _('with '), NameType)
info.chunks.append(' (')
info.chunks.append(n.Spouse.NationalNumber.NationalNumber)
info.chunks.append(')')
info.addfrom(n, 'Lieu', _('in '), LieuType)
# info += LieuType(n.Lieu)
info.addfrom(n, 'ActNumber', _("Act no. "))
# info.addfrom(n,'ActNumber')
info.addfrom(n, 'SuppletoryRegister')
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def HeadOfFamily(self, node, name):
self.start_group(_("Head Of Family"))
for n in node.HeadOfFamily:
info = IT140(n)
yield self.datarow(n, n.Date, info)
def FamilyMembers(self, node, name):
self.start_group(_("Family Members"))
for n in node.FamilyMember:
info = IT141(n)
yield self.datarow(n, n.Date, info)
def DrivingLicensesOldModel(self, node, name):
self.start_group(_("Driving Licenses Old Model"))
for n in node.DrivingLicense:
info = IT194(n)
yield self.datarow(n, n.Date, info)
def DrivingLicenses(self, node, name):
self.start_group(_("Driving Licenses"))
for n in node.DrivingLicense:
info = IT191(n)
yield self.datarow(n, n.Date, info)
def IdentityCards(self, node, name):
self.start_group(_("Identity Cards"))
for n in node.IdentityCard:
info = code_label(n.TypeOfCard)
info.chunks.append(' ')
info.chunks.append(_('no. '))
info.chunks.append(E.b(n.CardNumber))
info.addfrom(n, 'ExpiryDate', _('expires '), DateType)
# info.chunks.append(E.b(dd.dtos(rn2date(n.ExpiryDate))))
info.addfrom(n, 'Delivery', _('delivered in '), DeliveryType206)
# info.chunks.append(', delivered in ')
# info += code_label(n.Delivery.Place)
yield self.datarow(n, n.Date, info)
def LegalCohabitations(self, node, name):
def CessationType(n):
info = Info()
info.addfrom(n, 'Reason', _("Reason"), ReasonType)
info.addfrom(n, 'Place', _('in '), PlaceType)
info.addfrom(n, 'Notification', _('in '), NotificationType)
return info
def DeclarationType(n):
info = Info()
info.addfrom(n, 'RegistrationDate', '', DateType)
info.addfrom(n, 'Partner', _('with '), PartnerType)
info.addfrom(n, 'Place', _('in '), PlaceType)
info.addfrom(n, 'Notary', _('in '), NotaryType)
return info
self.start_group(_("Legal cohabitations"))
for n in node.LegalCohabitation:
info = Info()
info.addfrom(n, 'Declaration', _("Declaration"), DeclarationType)
info.addfrom(n, 'Cessation', _("Cessation"), CessationType)
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def IT253(self, node, name):
self.start_group(_("Creation Date"))
n = node # res.CreationDate
info = Info()
yield self.datarow(n, n.Date, info)
def IT254(self, node, name):
self.start_group(_("Last Update"))
n = node # res.LastUpdateDate
info = Info()
yield self.datarow(n, n.Date, info)
class RetrieveTIGroupsResult(ConfidentialResultsTable):
master = 'cbss.RetrieveTIGroupsRequest'
master_key = None
column_names = 'group:18 type:5 since:14 info:50'
@dd.displayfield(_("Group"))
def group(self, obj, ar):
if obj.counter == 1:
return obj.group
return ''
@dd.displayfield(_("TI"))
def type(self, obj, ar):
if obj.counter == 1:
return obj.type
return ''
@dd.virtualfield(models.DateField(_("Since")))
def since(self, obj, ar):
return obj.since
@dd.displayfield(_("Info"))
def info(self, obj, ar):
return obj.info
@classmethod
def get_data_rows(self, ar):
rti = ar.master_instance
if rti is None:
# print "20130425 rti is None"
return
self.check_permission(rti, ar)
# if not ipr.status in (RequestStates.ok,RequestStates.fictive):
# if not rti.status in (RequestStates.ok,RequestStates.warnings):
# return
reply = rti.get_service_reply()
if reply is None:
# print "20130425 reply is None"
return
# print "20130425 ok"
reply_has_result(reply)
res = reply.rrn_it_implicit
rf = RowFactory()
for name, node in res:
# print 20130425, name, node.__class__
m = getattr(rf, node.__class__.__name__, None)
if m is None:
m = rf.get_it_handler(node)
if m is None:
raise Exception("No handler for %s (%s)"
% (name, node.__class__.__name__))
for row in m(node, name):
yield row
|
agpl-3.0
| -3,831,657,913,008,972,300
| 27.594432
| 79
| 0.599337
| false
| 3.244706
| false
| false
| false
|
XBigTK13X/olava
|
source/builder.py
|
1
|
1909
|
import os
import datetime
import config
import pickle
from jinja2 import Environment, FileSystemLoader
pwd = os.path.dirname(os.path.abspath(__file__))
templates = Environment(loader=FileSystemLoader(os.path.join(pwd, 'templates')))
def createIndex(games,
platforms,
platformsOrder,
dayOrder,
releaseCount,
googleAnalyticsId):
global templates
rawData = {
'games': games,
'platforms': platforms,
'dayOrder': dayOrder,
'releaseCount': releaseCount,
'googleAnalyticsId': googleAnalyticsId
}
template = templates.get_template('index.html')
indexContent = template.render(games=games,
platforms=platforms,
platformsOrder=platformsOrder,
dayOrder=dayOrder,
releaseCount=releaseCount,
googleAnalyticsId=googleAnalyticsId)
if not os.path.exists(config.get().BuildOutputRoot):
os.makedirs(config.get().BuildOutputRoot)
indexPath = os.path.join(config.get().BuildOutputRoot, 'index.html')
with open(indexPath, 'w') as indexFile:
indexFile.write(indexContent)
print("Index file written to "+indexPath)
archiveRoot = os.path.join(config.get().BuildOutputRoot, 'archive')
if not os.path.exists(archiveRoot):
os.makedirs(archiveRoot)
dateToday = datetime.date.today()
archivePath = os.path.join(archiveRoot, str(dateToday))+".html"
with open(archivePath, 'w') as archiveFile:
archiveFile.write(indexContent)
rawPath = archivePath.replace('.html', '.pickle')
print("Archive file written to "+archivePath)
with open(rawPath, 'wb') as fp:
pickle.dump(rawData, fp)
print("Pickled raw data file written to "+rawPath)
|
apache-2.0
| -1,568,970,124,760,492,500
| 37.959184
| 80
| 0.619172
| false
| 4.289888
| false
| false
| false
|
mistercrunch/panoramix
|
superset/views/chart/views.py
|
1
|
3873
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from flask import g
from flask_appbuilder import expose, has_access
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from superset import db, is_feature_enabled
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import MODEL_VIEW_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.models.slice import Slice
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import (
check_ownership,
common_bootstrap_payload,
DeleteMixin,
SupersetModelView,
)
from superset.views.chart.mixin import SliceMixin
from superset.views.utils import bootstrap_user_data
class SliceModelView(
SliceMixin, SupersetModelView, DeleteMixin
): # pylint: disable=too-many-ancestors
route_base = "/chart"
datamodel = SQLAInterface(Slice)
include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.DOWNLOAD,
RouteMethod.API_READ,
RouteMethod.API_DELETE,
}
class_permission_name = "Chart"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
def pre_add(self, item: "SliceModelView") -> None:
utils.validate_json(item.params)
def pre_update(self, item: "SliceModelView") -> None:
utils.validate_json(item.params)
check_ownership(item)
def pre_delete(self, item: "SliceModelView") -> None:
check_ownership(item)
@expose("/add", methods=["GET", "POST"])
@has_access
def add(self) -> FlaskResponse:
datasources = [
{"value": str(d.id) + "__" + d.type, "label": repr(d)}
for d in ConnectorRegistry.get_all_datasources(db.session)
]
payload = {
"datasources": sorted(
datasources,
key=lambda d: d["label"].lower() if isinstance(d["label"], str) else "",
),
"common": common_bootstrap_payload(),
"user": bootstrap_user_data(g.user),
}
return self.render_template(
"superset/add_slice.html", bootstrap_data=json.dumps(payload)
)
@expose("/list/")
@has_access
def list(self) -> FlaskResponse:
if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"):
return super().list()
return super().render_app_template()
class SliceAsync(SliceModelView): # pylint: disable=too-many-ancestors
route_base = "/sliceasync"
include_route_methods = {RouteMethod.API_READ}
list_columns = [
"changed_on",
"changed_on_humanized",
"creator",
"datasource_id",
"datasource_link",
"datasource_url",
"datasource_name_text",
"datasource_type",
"description",
"description_markeddown",
"edit_url",
"icons",
"id",
"modified",
"owners",
"params",
"slice_link",
"slice_name",
"slice_url",
"viz_type",
]
label_columns = {"icons": " ", "slice_link": _("Chart")}
|
apache-2.0
| 1,508,857,023,377,381,400
| 32.102564
| 88
| 0.657113
| false
| 3.880762
| false
| false
| false
|
hackerspace-ntnu/website
|
files/views.py
|
1
|
1896
|
from django.shortcuts import render
from .models import Image, FileCategory
from .forms import ImageForm
from django.views.generic import CreateView, DeleteView, UpdateView, ListView, View
from django.shortcuts import redirect, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import PermissionRequiredMixin
class ImageDeleteView(PermissionRequiredMixin, DeleteView):
model = Image
success_url = '/files/images'
permission_required = "files.delete_image"
class ImageListView(PermissionRequiredMixin, ListView):
queryset = Image.objects.order_by('img_category', '-time')
template_name = 'files/images.html'
permission_required = 'files.view_image'
context_object_name = 'categories'
def get_queryset(self):
images = Image.objects.all()
categorized = {}
for category in FileCategory.objects.all().order_by('name'):
category_images = Image.objects.filter(img_category=category).order_by('-time')
if category_images:
categorized[category.name] = category_images
return categorized
class ImageView(PermissionRequiredMixin, View):
permission_required = "files.view_image"
def get(self, request, *args, **kwargs):
image = get_object_or_404(Image, pk=kwargs['pk'])
return HttpResponseRedirect('/media/'+str(image.file))
@login_required()
def imageUpload(request):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES, prefix='img')
if form.is_valid():
image = form.save(commit=False)
image.save()
return render(request, 'files/single-image.html', {'image':image})
else:
return HttpResponse(form.errors)
else:
return HttpResponseRedirect('/')
|
mit
| -5,348,036,515,881,527,000
| 37.693878
| 91
| 0.695148
| false
| 4.139738
| false
| false
| false
|
lensvol/repocket
|
repocket/main.py
|
1
|
4195
|
#!/usr/bin/python
import os
from collections import namedtuple
import yaml
from click import command, confirm, echo, option, prompt, secho
from pocket import Pocket
from rules import DEFAULT_RULES, compile_rules
PocketItem = namedtuple('PocketItem', ['id', 'url', 'tags', 'title'])
def save_config(path, cfg_dict):
with open(path, 'w') as fp:
fp.write(yaml.dump(cfg_dict))
return True
def load_config(path):
try:
with open(path, 'r') as fp:
return yaml.load(fp.read())
except IOError:
return {}
def get_consumer_key():
return prompt('Please enter your Pocket consumer key')
def get_access_token(consumer_key):
request_token = Pocket.get_request_token(
consumer_key=consumer_key,
redirect_uri='localhost',
)
auth_url = Pocket.get_auth_url(
code=request_token,
redirect_uri='localhost',
)
echo('Please, open this URL in your browser: {}'.format(auth_url))
if confirm('Did you went to that link?'):
echo('Getting credentials...')
credentials = Pocket.get_credentials(
consumer_key=consumer_key,
code=request_token,
)
return credentials['access_token']
def retrieve_items(pocket, count=10, sort=None, full=True):
call_args = dict(sort=sort or 'newest')
if full:
call_args['detailType'] = 'complete'
if count:
call_args['count'] = count
returned_items = pocket.get(**call_args)[0]['list']
for item_id, resp_item in returned_items.iteritems():
yield PocketItem(
item_id,
resp_item['resolved_url'],
resp_item.get('tags', {}).keys(),
resp_item['resolved_title']
)
def print_item(item, suggested_tags):
secho(u'Title:\t', fg='cyan', nl=False)
echo(item.title)
secho('URL:\t', fg='cyan', nl=False)
echo(item.url)
if item.tags:
secho('Existing tags:\t', fg='cyan', nl=False)
echo(', '.join(item.tags))
secho('Added tags:\t', fg='cyan', nl=False)
echo(', '.join(suggested_tags))
echo()
@command()
@option('--count', default=25, help='Number of items to process.')
@option('--dry-run', is_flag=True)
@option('-a', '--process-all', is_flag=True)
def processor(count, process_all, dry_run):
cfg_path = os.path.join(
os.path.expanduser('~'),
'.repocket.yml',
)
cfg = load_config(cfg_path)
creds = cfg.get('credentials', {})
consumer_key = creds.get('consumer_key')
access_token = creds.get('access_token')
if not consumer_key or not access_token:
consumer_key = get_consumer_key()
access_token = get_access_token(consumer_key)
cfg['credentials'] = {
'consumer_key': str(consumer_key),
'access_token': str(access_token),
}
secho('Your consumer key: ', fg='cyan', nl=False)
secho(consumer_key)
secho('Your access token: ', fg='cyan', nl=False)
secho(access_token)
echo()
api_connector = Pocket(consumer_key, access_token)
cfg.setdefault('rules', DEFAULT_RULES)
rules = compile_rules(cfg['rules'])
save_config(cfg_path, cfg)
secho('Processing items...', fg='cyan')
modified_items = []
items = retrieve_items(api_connector, count=process_all and 0 or count)
for item in items:
suggested_for_item = set()
for rule in rules:
tags = rule.suggest_tags(item)
suggested_for_item.update(tags or [])
new_tags = suggested_for_item - set(item.tags)
if new_tags:
modified_items.append((item, new_tags))
if modified_items:
echo()
for saved_item, suggested_tags in modified_items:
print_item(saved_item, suggested_tags)
api_connector.tags_add(saved_item.id, ','.join(list(suggested_tags)))
if not dry_run:
api_connector.commit()
secho('Changes are sent to server.', fg='green')
else:
secho('"Dry run", no changes are sent to server.', fg='yellow')
else:
secho('No changes have been made.', fg='green')
if __name__ == '__main__':
processor()
|
mit
| -4,351,194,532,131,688,400
| 28.335664
| 81
| 0.597616
| false
| 3.582408
| false
| false
| false
|
jinankjain/zamboni
|
mkt/ratings/serializers.py
|
1
|
5467
|
from django.core.urlresolvers import reverse
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from reviews.models import Review, ReviewFlag
from mkt.account.serializers import AccountSerializer
from mkt.api.fields import SlugOrPrimaryKeyRelatedField, SplitField
from mkt.api.exceptions import Conflict
from mkt.regions import get_region
from mkt.versions.serializers import SimpleVersionSerializer
from mkt.webapps.models import Webapp
class RatingSerializer(serializers.ModelSerializer):
app = SplitField(
SlugOrPrimaryKeyRelatedField(slug_field='app_slug',
queryset=Webapp.objects.all(),
source='addon'),
serializers.HyperlinkedRelatedField(view_name='app-detail',
read_only=True, source='addon'))
body = serializers.CharField()
user = AccountSerializer(read_only=True)
report_spam = serializers.SerializerMethodField('get_report_spam_link')
resource_uri = serializers.HyperlinkedIdentityField(
view_name='ratings-detail')
is_author = serializers.SerializerMethodField('get_is_author')
has_flagged = serializers.SerializerMethodField('get_has_flagged')
version = SimpleVersionSerializer(read_only=True)
class Meta:
model = Review
fields = ('app', 'body', 'created', 'has_flagged', 'is_author',
'modified', 'rating', 'report_spam', 'resource_uri', 'user',
'version')
def __init__(self, *args, **kwargs):
super(RatingSerializer, self).__init__(*args, **kwargs)
if 'request' in self.context:
self.request = self.context['request']
else:
self.request = None
if not self.request or not self.request.amo_user:
self.fields.pop('is_author')
self.fields.pop('has_flagged')
if self.request and self.request.method in ('PUT', 'PATCH'):
# Don't let users modify 'app' field at edit time
self.fields['app'].read_only = True
def get_report_spam_link(self, obj):
return reverse('ratings-flag', kwargs={'pk': obj.pk})
def get_is_author(self, obj):
return obj.user.pk == self.request.amo_user.pk
def get_has_flagged(self, obj):
return (not self.get_is_author(obj) and
obj.reviewflag_set.filter(user=self.request.amo_user).exists())
def validate(self, attrs):
if not getattr(self, 'object'):
# If we are creating a rating, then we need to do various checks on
# the app. Because these checks need the version as well, we have
# to do them here and not in validate_app().
# Assign user and ip_address. It won't change once the review is
# created.
attrs['user'] = self.request.amo_user
attrs['ip_address'] = self.request.META.get('REMOTE_ADDR', '')
# If the app is packaged, add in the current version.
if attrs['addon'].is_packaged:
attrs['version'] = attrs['addon'].current_version
# Return 409 if the user has already reviewed this app.
app = attrs['addon']
amo_user = self.request.amo_user
qs = self.context['view'].queryset.filter(addon=app, user=amo_user)
if app.is_packaged:
qs = qs.filter(version=attrs['version'])
if qs.exists():
raise Conflict('You have already reviewed this app.')
# Return 403 is the app is not public.
if not app.is_public():
raise PermissionDenied('The app requested is not public.')
# Return 403 if the user is attempting to review their own app.
if app.has_author(amo_user):
raise PermissionDenied('You may not review your own app.')
# Return 403 if not a free app and the user hasn't purchased it.
if app.is_premium() and not app.is_purchased(amo_user):
raise PermissionDenied("You may not review paid apps you "
"haven't purchased.")
# Return 403 if the app is not available in the current region.
current_region = get_region()
if not app.listed_in(region=current_region):
raise PermissionDenied('App not available in region "%s".' %
current_region.slug)
return attrs
def validate_app(self, attrs, source):
# Don't allow users to change the app on an existing rating.
if getattr(self, 'object'):
attrs[source] = self.object.addon
return attrs
class RatingFlagSerializer(serializers.ModelSerializer):
user = serializers.Field()
review_id = serializers.Field()
class Meta:
model = ReviewFlag
fields = ('review_id', 'flag', 'note', 'user')
def validate(self, attrs):
attrs['user'] = self.context['request'].amo_user
attrs['review_id'] = self.context['view'].kwargs['review']
if 'note' in attrs and attrs['note'].strip():
attrs['flag'] = ReviewFlag.OTHER
if ReviewFlag.objects.filter(review_id=attrs['review_id'],
user=attrs['user']).exists():
raise Conflict('You have already flagged this review.')
return attrs
|
bsd-3-clause
| 745,364,576,226,022,000
| 40.732824
| 79
| 0.605817
| false
| 4.342335
| false
| false
| false
|
d120/pyofahrt
|
ofahrtbase/migrations/0020_auto_20171016_2206.py
|
1
|
2501
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-16 20:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ofahrtbase', '0019_auto_20161020_1750'),
]
operations = [
migrations.AlterField(
model_name='ofahrt',
name='max_members',
field=models.IntegerField(
default=70,
help_text=
'Dieser Wert bestimmt die maximale Größe der Festanmeldeliste.',
verbose_name='Maximale Teilnehmendenzahl'),
),
migrations.AlterField(
model_name='ofahrt',
name='member_reg_open',
field=models.BooleanField(
default=False,
help_text=
'Ist dieser Wert aktiviert, können sich Teilnehmer*innen registrieren.',
verbose_name='Teilnehmeregistrierung'),
),
migrations.AlterField(
model_name='ofahrt',
name='orga_reg_open',
field=models.BooleanField(
default=False,
help_text=
'Ist dieser Wert aktiviert, können sich Studierende als Ofahrtorga bewerben.',
verbose_name='Orgaregistrierung'),
),
migrations.AlterField(
model_name='ofahrt',
name='queue_tolerance',
field=models.IntegerField(
default=20,
help_text=
'Dieser Wert legt fest, ab wann Neuanmeldungen von Teilnehmer*innen in die Warteschlange müssen. (Warteschlange falls: aktuelle Festanmeldungen + aktuell vorläufige Anmeldungen > maximale Festanmeldungen + dieser Wert)',
verbose_name='Warteschlangentoleranz'),
),
migrations.AlterField(
model_name='ofahrt',
name='self_participation',
field=models.IntegerField(
default=2000,
help_text='Eingenanteil der Teilnehmer*innen in Cent',
verbose_name='Teilnahmebeitrag'),
),
migrations.AlterField(
model_name='ofahrt',
name='workshop_reg_open',
field=models.BooleanField(
default=False,
help_text=
'Ist dieser Wert aktiviert, werden derzeit Workshops gesucht.',
verbose_name='Workshopregistrierung'),
),
]
|
agpl-3.0
| 1,181,482,808,610,669,800
| 35.691176
| 236
| 0.558717
| false
| 3.718331
| false
| false
| false
|
mozilla/kitchensinkserver
|
vendor-local/lib/python/tastypie/api.py
|
1
|
6721
|
import warnings
from django.conf.urls.defaults import *
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from tastypie.exceptions import NotRegistered, BadRequest
from tastypie.serializers import Serializer
from tastypie.utils import trailing_slash, is_valid_jsonp_callback_value
from tastypie.utils.mime import determine_format, build_content_type
class Api(object):
"""
Implements a registry to tie together the various resources that make up
an API.
Especially useful for navigation, HATEOAS and for providing multiple
versions of your API.
Optionally supplying ``api_name`` allows you to name the API. Generally,
this is done with version numbers (i.e. ``v1``, ``v2``, etc.) but can
be named any string.
"""
def __init__(self, api_name="v1", serializer_class=Serializer):
self.api_name = api_name
self._registry = {}
self._canonicals = {}
self.serializer = serializer_class()
def register(self, resource, canonical=True):
"""
Registers an instance of a ``Resource`` subclass with the API.
Optionally accept a ``canonical`` argument, which indicates that the
resource being registered is the canonical variant. Defaults to
``True``.
"""
resource_name = getattr(resource._meta, 'resource_name', None)
if resource_name is None:
raise ImproperlyConfigured("Resource %r must define a 'resource_name'." % resource)
self._registry[resource_name] = resource
if canonical is True:
if resource_name in self._canonicals:
warnings.warn("A new resource '%r' is replacing the existing canonical URL for '%s'." % (resource, resource_name), Warning, stacklevel=2)
self._canonicals[resource_name] = resource
# TODO: This is messy, but makes URI resolution on FK/M2M fields
# work consistently.
resource._meta.api_name = self.api_name
resource.__class__.Meta.api_name = self.api_name
def unregister(self, resource_name):
"""
If present, unregisters a resource from the API.
"""
if resource_name in self._registry:
del(self._registry[resource_name])
if resource_name in self._canonicals:
del(self._canonicals[resource_name])
def canonical_resource_for(self, resource_name):
"""
Returns the canonical resource for a given ``resource_name``.
"""
if resource_name in self._canonicals:
return self._canonicals[resource_name]
raise NotRegistered("No resource was registered as canonical for '%s'." % resource_name)
def wrap_view(self, view):
def wrapper(request, *args, **kwargs):
try:
return getattr(self, view)(request, *args, **kwargs)
except BadRequest:
return HttpResponseBadRequest()
return wrapper
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
Provides URLconf details for the ``Api`` and all registered
``Resources`` beneath it.
"""
pattern_list = [
url(r"^(?P<api_name>%s)%s$" % (self.api_name, trailing_slash()), self.wrap_view('top_level'), name="api_%s_top_level" % self.api_name),
]
for name in sorted(self._registry.keys()):
self._registry[name].api_name = self.api_name
pattern_list.append((r"^(?P<api_name>%s)/" % self.api_name, include(self._registry[name].urls)))
urlpatterns = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urlpatterns += overridden_urls
urlpatterns += patterns('',
*pattern_list
)
return urlpatterns
def top_level(self, request, api_name=None):
"""
A view that returns a serialized list of all resources registers
to the ``Api``. Useful for discovery.
"""
available_resources = {}
if api_name is None:
api_name = self.api_name
for name in sorted(self._registry.keys()):
available_resources[name] = {
'list_endpoint': self._build_reverse_url("api_dispatch_list", kwargs={
'api_name': api_name,
'resource_name': name,
}),
'schema': self._build_reverse_url("api_get_schema", kwargs={
'api_name': api_name,
'resource_name': name,
}),
}
desired_format = determine_format(request, self.serializer)
options = {}
if 'text/javascript' in desired_format:
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
serialized = self.serializer.serialize(available_resources, desired_format, options)
return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedApi._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
class NamespacedApi(Api):
"""
An API subclass that respects Django namespaces.
"""
def __init__(self, api_name="v1", urlconf_namespace=None, **kwargs):
super(NamespacedApi, self).__init__(api_name=api_name, **kwargs)
self.urlconf_namespace = urlconf_namespace
def register(self, resource, canonical=True):
super(NamespacedApi, self).register(resource, canonical=canonical)
if canonical is True:
# Plop in the namespace here as well.
resource._meta.urlconf_namespace = self.urlconf_namespace
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
|
bsd-3-clause
| -851,501,442,314,775,000
| 35.726776
| 153
| 0.613599
| false
| 4.313864
| false
| false
| false
|
mfiers/Moa
|
moa/utils.py
|
1
|
7301
|
#!/usr/bin/env python
# Copyright 2009-2011 Mark Fiers
# The New Zealand Institute for Plant & Food Research
#
# This file is part of Moa - http://github.com/mfiers/Moa
#
# Licensed under the GPL license (see 'COPYING')
#
"""
moa.utils
---------
A set of random utilities used by Moa
"""
from email.mime.text import MIMEText
import fcntl
import os
import smtplib
import struct
import subprocess
import re
import sys
import termios
import traceback
import moa.utils
import moa.logger as l
def removeIndent(txt):
"""
Removes indentation from a txt - for use by moa.args and moa.api
"""
ld = [x.replace("\t", " ").rstrip()
for x in txt.split("\n")]
re_firstNonSpace = re.compile('\S')
indents = []
for line in ld:
# ignore empty lines
if not line:
continue
fns = re_firstNonSpace.search(line)
if fns:
indents.append(fns.start())
minIndent = min(indents)
nld = []
for line in ld:
if not line:
nld.append("")
else:
nld.append(line[minIndent:])
return "\n".join(nld)
def sendmail(server, sender, recipient, subject, message):
"""
Send an email.
"""
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
smtp_server = smtplib.SMTP(server)
smtp_server.sendmail(sender, [recipient], msg.as_string())
smtp_server.quit()
def niceRunTime(d):
"""
Nice representation of the run time
d is time duration string
"""
d = str(d)
if ',' in d:
days, time = d.split(',')
else:
days = 0
time = d
hours, minutes, seconds = time.split(':')
hours, minutes = int(hours), int(minutes)
seconds, miliseconds = seconds.split('.')
seconds = int(seconds)
miliseconds = int(miliseconds)
if days > 0:
if days == 1:
return "1 day, %d hrs" % hours
else:
return "%d days, %d hrs" % (days, hours)
if hours == 0 and minutes == 0 and seconds == 0:
return "<1 sec"
if hours > 0:
return "%d:%02d hrs" % (hours, minutes)
elif minutes > 0:
return "%d:%02d min" % (minutes, seconds)
else:
return "%d sec" % seconds
def getCwd():
"""
Do not use os.getcwd() -
need to make sure symbolic links do not get dereferenced
hijacked some code from:
http://stackoverflow.com/questions/123958/how-to-get-set-logical-directory-path-in-python
"""
cwd = os.environ.get("PWD")
if cwd is not None:
return cwd
# no environment. fall back to calling pwd on shell
cwd = subprocess.Popen(
'pwd',
stdout=subprocess.PIPE).communicate()[0].strip()
return cwd
def getTerminalSize():
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
def getProcessInfo(pid):
"""
Return some info on a process
"""
cl = ('ps --no-heading -fp %s' % (pid)).split()
p = subprocess.Popen(cl, stdout=subprocess.PIPE)
out = p.communicate()[0].strip().split(None, 7)
if not out:
return {}
pi = dict(zip(
'uid pid ppid c stime tty time cmd'.split(), out))
# check if this is moa invocation
if 'python' in pi['cmd'] and \
'moa' in pi['cmd']:
pi['moa'] = True
else:
pi['moa'] = False
return pi
def getMoaBase():
"""
Return MOABASE - the directory where Moa is installed. This
function also sets an environment variable `MOABASE`
>>> d = getMoaBase()
>>> assert(os.path.isdir(d))
>>> assert(os.path.isfile(os.path.join(d, 'README')))
>>> assert(os.path.isdir(os.path.join(d, 'lib')))
:rtype: string (path)
"""
if 'MOABASE' in os.environ:
MOABASE = os.environ["MOABASE"]
return MOABASE
thif = os.path.dirname(os.path.dirname(__file__))
if thif[-4:] == '.egg':
MOABASE = thif
else:
MOABASE = '/usr/share/moa'
# for depending scripts
os.putenv('MOABASE', MOABASE)
return MOABASE
def moaDirOrExit(job):
"""
Check if the job contains a proper Moa job, if not, exit with an
error message and a non-zero exit code.
:param job: An instance of :class:`moa.job.Job`
"""
if not job.isMoa():
moa.ui.exit("Need a Moa job")
sys.exit(-1)
def deprecated(func):
"""
Decorator function to flag a function as deprecated
:param func: any function
"""
def depfunc(*args, **kwargs):
l.critical('Calling deprecated function %s' % func.__name__)
l.critical("\n" + "\n".join(traceback.format_stack()))
func(*args, **kwargs)
return depfunc
def printstack(func):
"""
Decorator function to print stack
:param func: any function
"""
def depfunc(*args, **kwargs):
l.critical("\n" + "\n".join(traceback.format_stack()[:-1]))
func(*args, **kwargs)
return depfunc
def simple_decorator(decorator):
"""
This decorator can be used to turn simple functions into
well-behaved decorators, so long as the decorators are fairly
simple. If a decorator expects a function and returns a function
(no descriptors), and if it doesn't modify function attributes or
docstring, then it is eligible to use this. Simply apply
@simple_decorator to your decorator and it will automatically
preserve the docstring and function attributes of functions to
which it is applied.
Note; I got this code from somehwere, but forgot where
exactly. This seems the most likely source:
http://svn.navi.cx/misc/trunk/djblets/djblets/util/decorators.py
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
@simple_decorator
def flog(func):
"""
A simple logger - uses the :mod:`moa.logger` code to log the
calling function. Use as a decorator::
@moa.utils.flog
def any_function(*args);
...
This is for debugging purposes (obviously)
:param func: Any python function
"""
def flogger(*args, **kwargs):
l.critical("Executing %s" % func.__name__)
for a in args:
l.error(" - calling with arg %s" % a)
for k in kwargs.keys():
l.error(" - calling with kwargs %s=%s" % (k, kwargs[k]))
return func(*args, **kwargs)
return flogger
|
gpl-3.0
| 182,209,178,476,799,500
| 24.003425
| 93
| 0.583482
| false
| 3.582434
| false
| false
| false
|
django-danceschool/django-danceschool
|
danceschool/core/management/commands/setup_permissions.py
|
1
|
9771
|
from django.core.management.base import BaseCommand
from django.apps import apps
from django.contrib.auth.models import Group, Permission
from six.moves import input
try:
import readline
except ImportError:
pass
class Command(BaseCommand):
help = 'Create default groups and permissions for standard dance school setups'
def boolean_input(self, question, default=None):
'''
Method for yes/no boolean inputs
'''
result = input("%s: " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def handle(self, *args, **options):
self.stdout.write(
"""
USER GROUPS AND PERMISSIONS
---------------------------
This project allows you to provide finely-grained permissions to individual users and
user groups, such as instructors and administrators. This allows you to let different
types of users manage different types of content while still maintaining appropriate
security.
To get you started with the permissions system, we can create three initial user
groups, and give them different levels of permissions over content:
- The "Board" group: Users in this group will receive permissions to edit
all public-facing content as well as all financial records. They will not
automaticcaly receive permissions to edit certain other sitewide settings for
security reasons.
- The "Instructor" group: Users in this group will receive permissions to use
school administrative functions such as emailing students, submitting expenses
and revenues, and viewing their own statistics and payment history. However, by
default, these users cannot edit public-facing content such as page content or
FAQs.
- The "Registration Desk" group: Users in this group receive only the ability
to log into the site in order to view class registrations and check in students.
By default, they cannot access any other administrative function.
We strongly encourage you to create these initial groups as a starting point for
managing staff permissions on the site. The superuser that you created previously
will always retain permissions to edit all content and settings. Additionally, you
can always go on to create additional groups, or to edit permissions on either a
group basis or an individual user basis.
Note: This process may take a minute or two to complete.
"""
)
create_board_group = self.boolean_input('Create \'Board\' group with default initial permissions [Y/n]', True)
if create_board_group:
board_group = Group.objects.get_or_create(name='Board')[0]
# The Board group get all permissions on the CMS app and on all danceschool apps, plus
# the permissions explicitly listed here by their natural_key. Unfortunately this is
# slow because we have to check permissions one-by-one
give_explicit = [
('add_emailaddress', 'account', 'emailaddress'),
('change_emailaddress', 'account', 'emailaddress'),
('delete_emailaddress', 'account', 'emailaddress'),
('add_user', 'auth', 'user'),
('change_user', 'auth', 'user'),
]
app_add_list = ['cms', 'core', 'djangocms_forms', 'djangocms_text_ckeditor', 'easy_thumbnails', 'filer']
for this_app in [
'danceschool.financial',
'danceschool.discounts',
'danceschool.door',
'danceschool.faq',
'danceschool.guestlist',
'danceschool.news',
'danceschool.prerequisites',
'danceschool.private_events',
'danceschool.private_lessons',
'danceschool.stats',
'danceschool.vouchers',
'danceschool.banlist',
'danceschool.payments.payatdoor',
'danceschool.payments.paypal',
'danceschool.payments.stripe',
'danceschool.payments.square',
]:
if apps.is_installed(this_app):
app_add_list.append(this_app.split('.')[1])
for perm in Permission.objects.all():
if perm.natural_key() in give_explicit or perm.natural_key()[1] in app_add_list:
board_group.permissions.add(perm)
self.stdout.write('Finished creating \'Board\' group and setting initial permissions.\n')
create_instructor_group = self.boolean_input(
'Create \'Instructor\' group with default initial permissions [Y/n]', True
)
if create_instructor_group:
instructor_group = Group.objects.get_or_create(name='Instructor')[0]
give_explicit = [
('view_page', 'cms', 'page'),
('add_classdescription', 'core', 'classdescription'),
('change_classdescription', 'core', 'classdescription'),
('can_autocomplete_users', 'core', 'customer'),
('send_email', 'core', 'emailtemplate'),
('report_substitute_teaching', 'core', 'eventstaffmember'),
('update_instructor_bio', 'core', 'instructor'),
('view_own_instructor_finances', 'core', 'instructor'),
('view_own_instructor_stats', 'core', 'instructor'),
('process_refunds', 'core', 'invoice'),
('send_invoices', 'core', 'invoice'),
('view_all_invoices', 'core', 'invoice'),
('accept_door_payments', 'core', 'registration'),
('checkin_customers', 'core', 'registration'),
('ajax_registration', 'core', 'registration'),
('override_register_closed', 'core', 'registration'),
('override_register_dropins', 'core', 'registration'),
('override_register_soldout', 'core', 'registration'),
('register_dropins', 'core', 'registration'),
('view_registration_summary', 'core', 'registration'),
('view_school_stats', 'core', 'staffmember'),
('view_staff_directory', 'core', 'staffmember'),
('add_file', 'filer', 'file'),
('change_file', 'filer', 'file'),
('can_use_directory_listing', 'filer', 'folder'),
('add_image', 'filer', 'image'),
('change_image', 'filer', 'image'),
('add_expenseitem', 'financial', 'expenseitem'),
('mark_expenses_paid', 'financial', 'expenseitem'),
('add_revenueitem', 'financial', 'revenueitem'),
('view_finances_bymonth', 'financial', 'revenueitem'),
('mark_revenues_received', 'financial', 'revenueitem'),
('add_newsitem', 'news', 'newsitem'),
('change_newsitem', 'news', 'newsitem'),
('ignore_requirements', 'prerequisites', 'requirement'),
('add_eventreminder', 'private_events', 'eventreminder'),
('change_eventreminder', 'private_events', 'eventreminder'),
('delete_eventreminder', 'private_events', 'eventreminder'),
('add_privateevent', 'private_events', 'privateevent'),
('change_privateevent', 'private_events', 'privateevent'),
('delete_privateevent', 'private_events', 'privateevent'),
('edit_own_availability', 'private_lessons', 'instructoravailabilityslot'),
('view_banlist', 'banlist', 'bannedperson'),
('view_guestlist', 'guestlist', 'guestlist'),
]
for perm in Permission.objects.all():
if perm.natural_key() in give_explicit:
instructor_group.permissions.add(perm)
self.stdout.write('Finished creating \'Instructor\' group and setting initial permissions.\n')
create_regdesk_group = self.boolean_input(
'Create \'Registration Desk\' group with default initial permissions [Y/n]',
True
)
if create_regdesk_group:
regdesk_group = Group.objects.get_or_create(name='Registration Desk')[0]
give_explicit = [
('view_page', 'cms', 'page'),
('can_autocomplete_users', 'core', 'customer'),
('process_refunds', 'core', 'invoice'),
('send_invoices', 'core', 'invoice'),
('view_all_invoices', 'core', 'invoice'),
('accept_door_payments', 'core', 'registration'),
('checkin_customers', 'core', 'registration'),
('override_register_closed', 'core', 'registration'),
('override_register_dropins', 'core', 'registration'),
('override_register_soldout', 'core', 'registration'),
('ajax_registration', 'core', 'registration'),
('register_dropins', 'core', 'registration'),
('view_registration_summary', 'core', 'registration'),
('view_staff_directory', 'core', 'staffmember'),
('ignore_requirements', 'prerequisites', 'requirement'),
('view_banlist', 'banlist', 'bannedperson'),
('view_guestlist', 'guestlist', 'guestlist'),
]
for perm in Permission.objects.all():
if perm.natural_key() in give_explicit:
regdesk_group.permissions.add(perm)
self.stdout.write('Finished creating \'Registration\' group and setting initial permissions.\n')
|
bsd-3-clause
| -4,150,810,581,700,201,000
| 48.852041
| 118
| 0.58735
| false
| 4.461644
| false
| false
| false
|
pacoqueen/ginn
|
db/ajusta_precios_factura.py
|
1
|
2486
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#################################################################
# 10 de julio de 2006.
# Script temporal para ajustar los precios de LDV de algunas
# facturas antiguas DEBIDO A UN ERROR EN LAS MACROS EXCEL que
# usaban antes de la implantación de ginn. Para respetar la
# contabilidad de enero-mayo '06, este script ajusta los
# precios unitarios a los calculados (con chorromil decimales)
# por excel y que así coincidan los totales de facturación.
#################################################################
import sys, os
sys.path.append(os.path.join('..', 'framework'))
from framework import pclases
# Lista de pares (idldv, preciounidad):
ldvs = ((69, 0.39169906),
(77, 0.36143386),
(131, 0.21685764),
(141, 0.51147259),
(275, 0.23219231),
(149, 0.27408263),
(534, 0.3329553),
(561, 0.29571618),
(604, 1.4923387),
(558, 0.33879479),
(565, 0.39169958),
(540, 1.4923384),
(566, 0.50392024),
(612, 0.29134587),
(616, 0.29479676),
(567, 0.21685841),
(379, 0.50392043),
(339, 0.32200196),
(403, 0.31724339),
(412, 0.67335334),
(513, 0.21685887),
(516, 0.26690208),
(864, 0.21687323),
(167, 0.21685885),
(169, 0.39169906),
(300, 1.4923393),
(178, 0.29134589),
(575, 0.29134666),
(186, 0.39169576),
(194, 0.21365343),
(203, 0.21685893),
(204, 0.50392024)
)
for id, precio in ldvs:
ldv = pclases.LineaDeVenta.get(id)
print "Ajustando LDV %d de %f a %f..." % (id, ldv.precio, precio),
ldv.precio = precio
print "OK (%f)" % ldv.precio
print "Ajustando IVA factura O60001...",
fra = pclases.FacturaVenta.get(197)
fra.iva = 0
print "OK (%f)" % (fra.iva)
print "Ajustando IVA factura O60008...",
fra = pclases.FacturaVenta.get(204)
fra.iva = 0
print "OK (%f)" % (fra.iva)
print "Cambiando número factura O60008 dupliacada a O60011 y el IVA a 0...",
fra = pclases.FacturaVenta.get(207)
fra.numfactura = "O60011"
fra.iva = 0
print "OK (%s, %f)" % (fra.numfactura, fra.iva)
print "Ajustando IVA factura O60013...",
fra = pclases.FacturaVenta.get(209)
fra.iva = 0
print "OK (%f)" % (fra.iva)
print "Cambiando número factura G60003 dupliacada a G60004...",
fra = pclases.FacturaVenta.get(199)
fra.numfactura = "G60004"
print "OK (%s)" % (fra.numfactura)
|
gpl-2.0
| -395,667,731,017,486,700
| 30.405063
| 77
| 0.571544
| false
| 2.614331
| false
| false
| false
|
luoguizhou/gooderp_addons
|
goods/models/goods.py
|
1
|
7593
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError
class Goods(models.Model):
"""
继承了core里面定义的goods 模块,并定义了视图和添加字段。
"""
_inherit = 'goods'
@api.multi
def get_parent_tax_rate(self, parent_id):
# 逐级取商品分类上的税率
tax_rate = parent_id.tax_rate
if not tax_rate and parent_id.parent_id:
tax_rate = self.get_parent_tax_rate(parent_id.parent_id)
return tax_rate
@api.multi
def get_tax_rate(self, goods, partner, type):
"""
获得税率
如果商品上没有税率,则逐级取商品分类上的税率;
商品税率和业务伙伴税率做比较:如果都存在,取小的;其中一个存在取该值;都不存在取公司上的进/销项税
"""
if not goods:
return
goods_tax_rate, partner_tax_rate = False, False
# 如果商品上没有税率,则取商品分类上的税率
if goods.tax_rate:
goods_tax_rate = goods.tax_rate
elif goods.goods_class_id.tax_rate:
goods_tax_rate = goods.goods_class_id.tax_rate
elif goods.goods_class_id.parent_id: # 逐级取商品分类上的税率
goods_tax_rate = self.get_parent_tax_rate(goods.goods_class_id.parent_id)
# 取业务伙伴税率
if partner:
partner_tax_rate = partner.tax_rate
# 商品税率和业务伙伴税率做比较,并根据情况返回
if goods_tax_rate and partner_tax_rate:
if goods_tax_rate >= partner_tax_rate:
return partner_tax_rate
else:
return goods_tax_rate
elif goods_tax_rate and not partner_tax_rate:
return goods_tax_rate
elif not goods_tax_rate and partner_tax_rate:
return partner_tax_rate
else:
if type == 'buy':
return self.env.user.company_id.import_tax_rate
elif type == 'sell':
return self.env.user.company_id.output_tax_rate
no_stock = fields.Boolean(u'虚拟商品')
using_batch = fields.Boolean(u'管理批号')
force_batch_one = fields.Boolean(u'管理序列号')
attribute_ids = fields.One2many('attribute', 'goods_id', string=u'属性')
image = fields.Binary(u'图片', attachment=True)
supplier_id = fields.Many2one('partner',
u'默认供应商',
ondelete='restrict',
domain=[('s_category_id', '!=', False)])
price = fields.Float(u'零售价')
barcode = fields.Char(u'条形码')
note = fields.Text(u'备注')
goods_class_id = fields.Many2one(
'goods.class', string=u'商品分类',
help="Those categories are used to group similar products for point of sale.")
_sql_constraints = [
('barcode_uniq', 'unique(barcode)', u'条形码不能重复'),
]
@api.onchange('uom_id')
def onchange_uom(self):
"""
:return: 当选取单位时辅助单位默认和 单位相等。
"""
self.uos_id = self.uom_id
@api.onchange('using_batch')
def onchange_using_batch(self):
"""
:return: 当将管理批号的勾去掉后,自动将管理序列号的勾去掉
"""
if not self.using_batch:
self.force_batch_one = False
def conversion_unit(self, qty):
""" 数量 × 转化率 = 辅助数量
:param qty: 传进来数量计算出辅助数量
:return: 返回辅助数量
"""
self.ensure_one()
return self.conversion * qty
def anti_conversion_unit(self, qty):
""" 数量 = 辅助数量 / 转化率
:param qty: 传入值为辅助数量
:return: 数量
"""
self.ensure_one()
return self.conversion and qty / self.conversion or 0
class Attribute(models.Model):
_name = 'attribute'
_description = u'属性'
@api.one
@api.depends('value_ids')
def _compute_name(self):
self.name = ' '.join(
[value.category_id.name + ':' + value.value_id.name for value in self.value_ids])
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
'''在many2one字段中支持按条形码搜索'''
args = args or []
if name:
attribute_ids = self.search([('ean', '=', name)])
if attribute_ids:
return attribute_ids.name_get()
return super(Attribute, self).name_search(
name=name, args=args, operator=operator, limit=limit)
ean = fields.Char(u'条码')
name = fields.Char(u'属性', compute='_compute_name',
store=True, readonly=True)
goods_id = fields.Many2one('goods', u'商品', ondelete='cascade')
value_ids = fields.One2many(
'attribute.value', 'attribute_id', string=u'属性')
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
_sql_constraints = [
('ean_uniq', 'unique (ean)', u'该条码已存在'),
('goods_attribute_uniq', 'unique (goods_id, name)', u'该SKU已存在'),
]
@api.one
@api.constrains('value_ids')
def check_value_ids(self):
att_dict = {}
for line in self.value_ids:
if not att_dict.has_key(line.category_id):
att_dict[line.category_id] = line.category_id
else:
raise UserError(u'属性值的类别不能相同')
class AttributeValue(models.Model):
_name = 'attribute.value'
_rec_name = 'value_id'
_description = u'属性明细'
attribute_id = fields.Many2one('attribute', u'属性', ondelete='cascade')
category_id = fields.Many2one('core.category', u'属性',
ondelete='cascade',
domain=[('type', '=', 'attribute')],
context={'type': 'attribute'},
required='1')
value_id = fields.Many2one('attribute.value.value', u'值',
ondelete='restrict',
domain="[('category_id','=',category_id)]",
default=lambda self: self.env.context.get(
'default_category_id'),
required='1')
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
class AttributeValueValue(models.Model):
_name = 'attribute.value.value'
_description = u'属性值'
category_id = fields.Many2one('core.category', u'属性',
ondelete='cascade',
domain=[('type', '=', 'attribute')],
context={'type': 'attribute'},
required='1')
name = fields.Char(u'值', required=True)
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
_sql_constraints = [
('name_category_uniq', 'unique(category_id,name)', u'同一属性的值不能重复')
]
|
agpl-3.0
| -1,692,061,697,503,829,800
| 32.743842
| 93
| 0.54219
| false
| 2.978261
| false
| false
| false
|
funilrys/A-John-Shots
|
a_john_shots/helpers.py
|
1
|
5760
|
#!/bin/env python3
"""
A John Shots - A tool to get the Security Hash Algorightms (SHA) of all file in a given path.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Contributors:
Let's contribute to A John Shots!
Project link:
https://github.com/funilrys/A-John-Shots
License:
::
MIT License
Copyright (c) 2017-2019 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# pylint: disable=bad-continuation, too-many-arguments
from re import compile as comp
def combine_dicts(dict_1, dict_2):
"""
Combine two dictionnaries.
:param dict_1: The first dict.
:type dict_1: dict
:param dict_2: The second dict.
:type dict_2: dict
:return: The combined dict.
:rtype: dict
"""
result = {}
for key, value in dict_1.items():
if key in dict_2.keys():
if isinstance(dict_2[key], dict):
result[key] = combine_dicts(value, dict_2.pop(key))
else:
result[key] = value
for key, value in dict_2.items():
result[key] = value
return result
class Regex: # pylint: disable=too-few-public-methods
"""
A simple implementation ot the python.re package
Arguments:
- data: str or list
The data or a list of data to check.
- regex: str or list
The regex or a list or regex.
- return_data: bool
- True: Return matched string
- False: Return False|True
- group: int
The group to return.
- rematch: bool
Implementation of Bash ${BASH_REMATCH}.
- True: Returned matched groups into a list format.
- occurences: int
The number of occurence to replace.
"""
def __init__(
self, data, regex, group=0, occurences=0, rematch=False, return_data=True
):
super(Regex, self).__init__()
# We initiate the needed variable in order to be usable all over class
self.data = data
self.regex = regex
self.group = group
self.occurences = occurences
self.rematch = rematch
self.return_data = return_data
# We initiate regex according to self.escape status.
self.regex = regex
def match(self, regex=None, data_to_match=None):
"""
Used to get exploitable result of re.search
Arguments:
- data: str
The data or a list of data to check.
- regex: str
The regex or a list or regex.
Returns:
list or bool
- bool: if self.return_data is False
- list: otherwise
"""
# We initate this variable which gonna contain the returned data
result = []
if not regex:
regex = self.regex
if not data_to_match:
data_to_match = self.data
# We compile the regex string
to_match = comp(regex)
# In case we have to use the implementation of ${BASH_REMATCH} we use
# re.findall otherwise, we use re.search
if self.rematch:
pre_result = to_match.findall(data_to_match)
else:
pre_result = to_match.search(data_to_match)
if self.return_data and pre_result is not None:
if self.rematch:
for data in pre_result:
if isinstance(data, tuple):
result.extend(list(data))
else:
result.append(data)
if self.group != 0:
return result[self.group]
else:
result = pre_result.group(self.group).strip()
return result
if not self.return_data and pre_result is not None:
return True
return False
def loop_matching(self):
"""
This method can be used to perform a loop matching.
"""
results = []
if isinstance(self.data, str):
if isinstance(self.regex, list):
for exp in self.regex:
matched = self.match(regex=exp)
try:
results.extend(matched)
except TypeError:
results.append(matched)
if not self.return_data:
if True in results:
return True
return False
else:
return self.match()
elif isinstance(self.data, list) and isinstance(self.regex, str):
for string in self.data:
results.extend(self.match(data_to_match=string))
return results
|
mit
| 3,648,964,506,032,662,000
| 28.84456
| 93
| 0.584201
| false
| 4.350453
| false
| false
| false
|
katyhuff/moose
|
python/utils/MooseSourceParser.py
|
1
|
4710
|
import sys
import os
import re
import subprocess
import clang.cindex
if 'MOOSE_CLANG_LIB' not in os.environ:
raise EnvironmentError("Using the MooseSourceParser requires setting 'MOOSE_CLANG_LIB' environment variable to point to the clang library.")
clang.cindex.Config.set_library_path(os.getenv('MOOSE_CLANG_LIB'))
class MooseSourceParser(object):
"""
An object for parsing MOOSE source code.
Args:
app_path[str]: The path that contains the application Makefile (needed for extracting includes).
"""
def __init__(self, app_path):
# Check that the supplied path has a Makefile (for getting includes)
if not os.path.exists(os.path.join(app_path, 'Makefile')):
#TODO: Make this a MooseException and log the exception and also check that the make file os one from MOOSE
print 'The supplied application directory does not contain a Makefile:', app_path
return
# Extract the includes from the Makefile
self._includes = self.includes(app_path)
def parse(self, filename):
"""
Parse the supplied C/h file with clang.
Args:
filename[str]: The filename to parse.
"""
# Check that the supplied file exists
if not os.path.exists(filename):
#TODO: Proper exception and logging
print 'The supplied source/header file does not exist:', filename
return
# Build the flags to pass to clang
includes = ['-x', 'c++', '-std=c++11']
includes += self._includes
# Build clang translation unit
index = clang.cindex.Index.create()
self._translation_unit = index.parse(filename, includes)
@staticmethod
def includes(app_path):
"""
Returns the includes by running 'make echo_include' for an application.
Args:
app_path[str]: A valid moose application or directory with a MOOSE Makefile (e.g., framework).
"""
p = subprocess.Popen(['make', 'echo_include'], cwd=app_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
for match in re.finditer(r'-I(.*?)\s', output):
yield match.group(0).strip().strip('\n')
def method(self, name):
"""
Retrieve a class declaration and definition by name.
Args:
name[str]: The name of the method to extract.
Returns:
decl[str], defn[str]: A string containing the declaration and definition of the desired method.
"""
decl = None
defn = None
cursors = self.find(clang.cindex.CursorKind.CXX_METHOD, name=name)
for c in cursors:
if c.is_definition():
defn = self.content(c)
else:
decl = self.content(c)
return decl, defn
def dump(self, cursor=None, level = 0, **kwargs):
"""
A tool for dumping the cursor tree.
"""
if cursor == None:
cursor = self._translation_unit.cursor,
recursive = kwargs.pop('recursive', True)
for c in cursor.get_children():
print ' '*4*level, c.kind, c.spelling, c.extent.start.file, c.extent.start.line
if recursive and c.get_children():
self.dump(c, level+1)
@staticmethod
def content(cursor):
source_range = cursor.extent
fid = open(source_range.start.file.name, 'r')
content = fid.read()[source_range.start.offset:source_range.end.offset]
fid.close()
return content
def find(self, kind, **kwargs):
"""
Locate the clang.cindex.Cursor object(s). (public)
Args:
kind[int]: The type of cursor (see clang.cindex.py) to locate.
Kwargs:
name[str]: The name of the cursor to return (i.e., Cursor.spelling)
definition[bool]: Only include items with 'is_definition' set to true.
Returns:
A list of all cursors matching the kind and optionally the name.
"""
name = kwargs.pop('name', None)
defn = kwargs.pop('definition', False)
for cursor in self._translation_unit.cursor.walk_preorder():
if (hasattr(cursor, 'kind')) and (cursor.kind == kind) and (name == None or cursor.spelling == name):
#print cursor.extent.start.file
yield cursor
if __name__ == '__main__':
src = '/Users/slauae/projects/moose/framework/src/kernels/Diffusion.C'
parser = MooseSourceParser('/Users/slauae/projects/moose/framework')
parser.parse(src)
decl, defn = parser.method('computeQpResidual')
print decl, defn
|
lgpl-2.1
| 5,019,072,454,677,851,000
| 31.937063
| 144
| 0.605732
| false
| 4.095652
| false
| false
| false
|
ShrimpingIt/tableaux
|
regimes/flopbunny/main.py
|
1
|
2389
|
from time import sleep
from uos import urandom
from machine import Pin
from cockle import pins
dataPin = pins[1]
clockPin = pins[2]
latchPin = pins[3]
dataPin.init(Pin.OUT)
latchPin.init(Pin.OUT)
clockPin.init(Pin.OUT)
dataPin.value(0)
latchPin.value(0)
clockPin.value(0)
delay = 1
numLights = 8
byteCount = numLights // 8
backBytes = [0 for pos in range(byteCount)]
def latch():
latchPin.value(1)
latchPin.value(0)
def clock():
clockPin.value(1)
clockPin.value(0)
def writeByte(val):
bit = 1
for step in range(8):
if val & bit != 0:
dataPin.value(1)
else:
dataPin.value(0)
clock()
bit = bit << 1
def send(lit):
if (lit):
dataPin.value(1)
else:
dataPin.value(0)
for step in range(8):
clock()
latch()
def setLight(pos, lit, show=True):
bytePos = pos // 8
bitPos = pos % 8
if lit:
backBytes[bytePos] = backBytes[bytePos] | (1 << bitPos)
else:
backBytes[bytePos] = backBytes[bytePos] & ~(1 << bitPos)
if (show):
flip()
def flip():
for pos in range(len(backBytes)):
writeByte(backBytes[pos])
latch()
def turnOn(lights):
for pos in range(len(lights)):
setLight(lights[pos], True, False)
flip()
def turnOff(lights):
for pos in range(len(lights)):
setLight(lights[pos], False, False)
flip()
def sequence(lights, delay=0.1, count=1):
while True:
for outer in range(len(lights)):
for inner in range(len(lights)):
setLight(lights[inner], inner == outer)
sleep(delay)
def identify():
for lightPos in range(numLights):
setLight(lightPos, False)
for lightPos in range(numLights):
setLight(lightPos, True)
input("Light Number " + str(lightPos))
setLight(lightPos, False)
def walk():
global backBytes
while True:
backBytes = [ord(urandom(1)) for item in backBytes]
flip()
sleep(1)
eyes = [0]
earsUp = [1]
earLeft = [4]
earRight = [5]
earsDown = earLeft + earRight
glasses = [2]
head = [3]
def sequence():
turnOn(head + glasses + eyes)
turnOff(earsUp); turnOn(earsDown)
sleep(1)
turnOff(earsDown); turnOn(earsUp)
sleep(1)
def animate():
while True:
sequence()
def illuminate():
turnOn(range(numLights))
animate()
|
agpl-3.0
| -6,227,094,358,887,579,000
| 17.098485
| 64
| 0.598995
| false
| 3.047194
| false
| false
| false
|
SangRyul/bamboo
|
edit/change.py
|
1
|
1175
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
def fileformatting():
import re
errorlog = open("error_log.txt","w+", encoding = "UTF8")
for x in range(1,1015):
try:
#please change this number
f = open(str(x)+".txt", "r+", encoding = "UTF8")
time = f.readline() # 시간
r = f.read()
if("<br /" in r):
r = r.replace("<br />", "")
article = r.split("#대나무")
for k in range(len(article)):
if(len(article[k])>1 and article[k][0].isdigit()):
bamboo_name = re.search(r'\d+', article[k]).group()
article[k] = article[k].replace(bamboo_name, "")
newfile = open(bamboo_name+".txt", "w+", encoding = "UTF8")
newfile.write(time)
newfile.write(article[k])
print(x)
except:
errorlog.write(str(x)+'파일이 손상되었음 \n')
if __name__ == "__main__":
fileformatting()
|
gpl-3.0
| -1,194,179,583,405,264,600
| 27.04878
| 79
| 0.422106
| false
| 3.590625
| false
| false
| false
|
TejasM/wisely
|
wisely_project/users/urls.py
|
1
|
1393
|
import views
__author__ = 'tmehta'
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', views.login_user, name='login'),
url(r'^logout/$', views.logout_user, name='logout'),
url(r'^signup/$', views.signup, name='sign-up'),
url(r'^index/$', views.index_alt, name='index'),
url(r'^index/alt$', views.index_alt, name='index_alt'),
url(r'^check_updated/$', views.check_updated, name='check_update'),
url(r'^force_updated/$', views.force_updated, name='force_update'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^edit_profile/$', views.edit_profile, name='edit_profile'),
url(r'^profile/(?P<user_id>\w+)/$', views.public_profile, name='public_profile'),
url(r'^news/$', views.news, name='news'),
url(r'^compose/$', views.compose, name='compose'),
url(r'^reply/$', views.reply, name='reply'),
url(r'^follow/$', views.follow, name='follow'),
url(r'^get_course_stats/$', views.get_course_stats, name='get_course_stats'),
url(r'^contact_us/$', views.contact_us, name='contact_us'),
)
|
mit
| 6,447,398,637,610,337,000
| 57.083333
| 104
| 0.492462
| false
| 4.133531
| false
| true
| false
|
TransparentHealth/hhs_oauth_client
|
apps/provider/migrations/0010_auto_20160623_1813.py
|
1
|
1676
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('provider', '0009_auto_20160623_1600'),
]
operations = [
migrations.RemoveField(
model_name='organization',
name='addresses',
),
migrations.RemoveField(
model_name='organization',
name='affiliations',
),
migrations.RemoveField(
model_name='organization',
name='licenses',
),
migrations.RemoveField(
model_name='organization',
name='taxonomies',
),
migrations.RemoveField(
model_name='practitioner',
name='addresses',
),
migrations.RemoveField(
model_name='practitioner',
name='affiliations',
),
migrations.RemoveField(
model_name='practitioner',
name='licenses',
),
migrations.RemoveField(
model_name='practitioner',
name='taxonomies',
),
migrations.AddField(
model_name='affiliation',
name='npi',
field=models.CharField(default='', max_length=10, blank=True),
),
migrations.AddField(
model_name='license',
name='npi',
field=models.CharField(default='', max_length=10, blank=True),
),
migrations.AddField(
model_name='taxonomy',
name='npi',
field=models.CharField(default='', max_length=10, blank=True),
),
]
|
apache-2.0
| -2,265,275,564,265,581,000
| 26.47541
| 74
| 0.520883
| false
| 4.629834
| false
| false
| false
|
mtwilliams/mojo
|
dependencies/assimp-2.0.863/port/PyAssimp/pyassimp/structs.py
|
1
|
31693
|
#-*- coding: UTF-8 -*-
from ctypes import POINTER, c_int, c_uint, c_char, c_float, Structure, c_char_p, c_double, c_ubyte
class Matrix3x3(Structure):
"""
See 'aiMatrix3x3.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),
]
class Matrix4x4(Structure):
"""
See 'aiMatrix4x4.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float),
("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float),
]
class Face(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Number of indices defining this face. 3 for a triangle, >3 for polygon
("mNumIndices", c_uint),
# Pointer to the indices array. Size of the array is given in numIndices.
("mIndices", POINTER(c_uint)),
]
class VertexWeight(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Index of the vertex which is influenced by the bone.
("mVertexId", c_uint),
# The strength of the influence in the range (0...1).
# The influence from all bones at one vertex amounts to 1.
("mWeight", c_float),
]
class Quaternion(Structure):
"""
See 'aiQuaternion.h' for details.
"""
_fields_ = [
# w,x,y,z components of the quaternion
("w", c_float),("x", c_float),("y", c_float),("z", c_float),
]
class Texel(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte),
]
class Plane(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Plane equation
("a", c_float),("b", c_float),("c", c_float),("d", c_float),
]
class Color3D(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Red, green and blue color values
("r", c_float),("g", c_float),("b", c_float),
]
class Color4D(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Red, green, blue and alpha color values
("r", c_float),("g", c_float),("b", c_float),("a", c_float),
]
class String(Structure):
"""
See 'aiTypes.h' for details.
"""
MAXLEN = 1024
_fields_ = [
# Length of the string excluding the terminal 0
("length", c_uint),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MemoryInfo(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Storage allocated for texture data, in bytes
("textures", c_uint),
# Storage allocated for material data, in bytes
("materials", c_uint),
# Storage allocated for mesh data, in bytes
("meshes", c_uint),
# Storage allocated for node data, in bytes
("nodes", c_uint),
# Storage allocated for animation data, in bytes
("animations", c_uint),
# Storage allocated for camera data, in bytes
("cameras", c_uint),
# Storage allocated for light data, in bytes
("lights", c_uint),
# Storage allocated for the full import, in bytes
("total", c_uint),
]
class Vector2D(Structure):
"""
See 'aiVector2D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),
]
class Vector3D(Structure):
"""
See 'aiVector3D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),("z", c_float),
]
class Node(Structure):
"""
See 'aiScene.h' for details.
"""
Node._fields_ = [
#The name of the node.
# #The name might be empty (length of zero) but all nodes which
#need to be accessed afterwards by bones or anims are usually named.
#Multiple nodes may have the same name, but nodes which are accessed
#by bones (see #aiBone and #aiMesh::mBones) *must* be unique.
#
#Cameras and lights are assigned to a specific node name - if there
#are multiple nodes with this name, they're assigned to each of them.
#<br>
#There are no limitations regarding the characters contained in
#this text. You should be able to handle stuff like whitespace, tabs,
#linefeeds, quotation marks, ampersands, ... .
#
("mName", String),
#The transformation relative to the node's parent.#
("mTransformation", Matrix4x4),
#Parent node. NULL if this node is the root node.#
("mParent", POINTER(Node)),
#The number of child nodes of this node.#
("mNumChildren", c_uint),
#The child nodes of this node. NULL if mNumChildren is 0.#
("mChildren", POINTER(POINTER(Node))),
#The number of meshes of this node.#
("mNumMeshes", c_uint),
#The meshes of this node. Each entry is an index into the mesh#
("mMeshes", POINTER(c_uint)),
]
class VectorKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Vector3D),
]
class QuatKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Quaternion),
]
class NodeAnim(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
#The name of the node affected by this animation. The node
# must exist and it must be unique.
#
("mNodeName", String),
#The number of position keys#
("mNumPositionKeys", c_uint),
#The position keys of this animation channel. Positions are
#specified as 3D vector. The array is mNumPositionKeys in size.
# #If there are position keys, there will also be at least one
#scaling and one rotation key.
#
("mPositionKeys", POINTER(VectorKey)),
#The number of rotation keys#
("mNumRotationKeys", c_uint),
#The rotation keys of this animation channel. Rotations are
# given as quaternions, which are 4D vectors. The array is
# mNumRotationKeys in size.
# #If there are rotation keys, there will also be at least one
#scaling and one position key.
#
("mRotationKeys", POINTER(QuatKey)),
#The number of scaling keys#
("mNumScalingKeys", c_uint),
#The scaling keys of this animation channel. Scalings are
# specified as 3D vector. The array is mNumScalingKeys in size.
# #If there are scaling keys, there will also be at least one
#position and one rotation key.
#
("mScalingKeys", POINTER(VectorKey)),
#Defines how the animation behaves before the first
# key is encountered.
# # The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is used).
#
("mPreState", c_uint),
#Defines how the animation behaves after the last
# key was processed.
# # The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is taken).
#
("mPostState", c_uint),
]
class Animation(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
#The name of the animation. If the modeling package this data was
# exported from does support only a single animation channel, this
# name is usually empty (length is zero).
#
("mName", String),
#Duration of the animation in ticks.
#
("mDuration", c_double),
#Ticks per second. 0 if not specified in the imported file
#
("mTicksPerSecond", c_double),
#The number of bone animation channels. Each channel affects
# a single node.
#
("mNumChannels", c_uint),
#The node animation channels. Each channel affects a single node.
# The array is mNumChannels in size.
#
("mChannels", POINTER(POINTER(NodeAnim))),
]
class Camera(Structure):
"""
See 'aiCamera.h' for details.
"""
_fields_ = [
#The name of the camera.
# # There must be a node in the scenegraph with the same name.
# This node specifies the position of the camera in the scene
# hierarchy and can be animated.
#
("mName", String),
#Position of the camera relative to the coordinate space
# defined by the corresponding node.
# # The default value is 0|0|0.
#
("mPosition", Vector3D),
#'Up' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# # The 'right' vector of the camera coordinate system is
# the cross product of the up and lookAt vectors.
# The default value is 0|1|0. The vector
# may be normalized, but it needn't.
#
("mUp", Vector3D),
#'LookAt' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# # This is the viewing direction of the user.
# The default value is 0|0|1. The vector
# may be normalized, but it needn't.
#
("mLookAt", Vector3D),
#Half horizontal field of view angle, in radians.
# # The field of view angle is the angle between the center
# line of the screen and the left or right border.
# The default value is 1/4PI.
#
("mHorizontalFOV", c_float),
#Distance of the near clipping plane from the camera.
# #The value may not be 0.f (for arithmetic reasons to prevent
#a division through zero). The default value is 0.1f.
#
("mClipPlaneNear", c_float),
#Distance of the far clipping plane from the camera.
# #The far clipping plane must, of course, be farer away than the
#near clipping plane. The default value is 1000.f. The ratio
#between the near and the far plane should not be too
#large (between 1000-10000 should be ok) to avoid floating-point
#inaccuracies which could lead to z-fighting.
#
("mClipPlaneFar", c_float),
#Screen aspect ratio.
# #This is the ration between the width and the height of the
#screen. Typical values are 4/3, 1/2 or 1/1. This value is
#0 if the aspect ratio is not defined in the source file.
#0 is also the default value.
#
("mAspect", c_float),
]
class Light(Structure):
"""
See 'aiLight.h' for details.
"""
_fields_ = [
#The name of the light source.
# # There must be a node in the scenegraph with the same name.
# This node specifies the position of the light in the scene
# hierarchy and can be animated.
#
("mName", String),
#The type of the light source.
# #aiLightSource_UNDEFINED is not a valid value for this member.
#
("mType", c_uint),
#Position of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# # The position is undefined for directional lights.
#
("mPosition", Vector3D),
#Direction of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# # The direction is undefined for point lights. The vector
# may be normalized, but it needn't.
#
("mDirection", Vector3D),
#Constant light attenuation factor.
# # The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1#d + att2#d*d)
# @endcode
# This member corresponds to the att0 variable in the equation.
# Naturally undefined for directional lights.
#
("mAttenuationConstant", c_float),
#Linear light attenuation factor.
# # The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1#d + att2#d*d)
# @endcode
# This member corresponds to the att1 variable in the equation.
# Naturally undefined for directional lights.
#
("mAttenuationLinear", c_float),
#Quadratic light attenuation factor.
#
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1#d + att2#d*d)
# @endcode
# This member corresponds to the att2 variable in the equation.
# Naturally undefined for directional lights.
#
("mAttenuationQuadratic", c_float),
#Diffuse color of the light source
# # The diffuse light color is multiplied with the diffuse
# material color to obtain the final color that contributes
# to the diffuse shading term.
#
("mColorDiffuse", Color3D),
#Specular color of the light source
# # The specular light color is multiplied with the specular
# material color to obtain the final color that contributes
# to the specular shading term.
#
("mColorSpecular", Color3D),
#Ambient color of the light source
# # The ambient light color is multiplied with the ambient
# material color to obtain the final color that contributes
# to the ambient shading term. Most renderers will ignore
# this value it, is just a remaining of the fixed-function pipeline
# that is still supported by quite many file formats.
#
("mColorAmbient", Color3D),
#Inner angle of a spot light's light cone.
# # The spot light has maximum influence on objects inside this
# angle. The angle is given in radians. It is 2PI for point
# lights and undefined for directional lights.
#
("mAngleInnerCone", c_float),
#Outer angle of a spot light's light cone.
# # The spot light does not affect objects outside this angle.
# The angle is given in radians. It is 2PI for point lights and
# undefined for directional lights. The outer angle must be
# greater than or equal to the inner angle.
# It is assumed that the application uses a smooth
# interpolation between the inner and the outer cone of the
# spot light.
#
("mAngleOuterCone", c_float),
]
class UVTransform(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
#Translation on the u and v axes.
# # The default value is (0|0).
#
("mTranslation", Vector2D),
#Scaling on the u and v axes.
# # The default value is (1|1).
#
("mScaling", Vector2D),
#Rotation - in counter-clockwise direction.
# # The rotation angle is specified in radians. The
# rotation center is 0.5f|0.5f. The default value
# 0.f.
#
("mRotation", c_float),
]
class MaterialProperty(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
#Specifies the name of the property (key)
## Keys are case insensitive.
#
("mKey", String),
#Textures: Specifies the exact usage semantic.
#
# For non-texture properties, this member is always 0
# or #aiTextureType_NONE.
#
("mSemantic", c_uint),
#Textures: Specifies the index of the texture
# # For non-texture properties, this member is always 0.
#
("mIndex", c_uint),
#Size of the buffer mData is pointing to, in bytes.
# #This value may not be 0.
#
("mDataLength", c_uint),
#Type information for the property.
##Defines the data layout inside the data buffer. This is used
#by the library internally to perform debug checks and to
#utilize proper type conversions.
#(It's probably a hacky solution, but it works.)
#
("mType", c_uint),
#Binary buffer to hold the property's value
##The size of the buffer is always mDataLength.
#
("mData", POINTER(c_char)),
]
class Material(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
#List of all material properties loaded.#
("mProperties", POINTER(POINTER(MaterialProperty))),
#Number of properties in the data base#
("mNumProperties", c_uint),
#Storage allocated#
("mNumAllocated", c_uint),
]
class Bone(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# The name of the bone.
("mName", String),
# The number of vertices affected by this bone
("mNumWeights", c_uint),
# The vertices affected by this bone
("mWeights", POINTER(VertexWeight)),
# Matrix that transforms from mesh space to bone space in bind pose
("mOffsetMatrix", Matrix4x4),
]
class Mesh(Structure):
"""
See 'aiMesh.h' for details.
"""
AI_MAX_NUMBER_OF_COLOR_SETS = 0x4
AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x4
_fields_ = [
#Bitwise combination of the members of the #aiPrimitiveType enum.
#This specifies which types of primitives are present in the mesh.
#The "SortByPrimitiveType"-Step can be used to make sure the
#output meshes consist of one primitive type each.
#
("mPrimitiveTypes", c_uint),
#The number of vertices in this mesh.
#This is also the size of all of the per-vertex data arrays
#
("mNumVertices", c_uint),
#The number of primitives (triangles, polygons, lines) in this mesh.
#This is also the size of the mFaces array
#
("mNumFaces", c_uint),
#Vertex positions.
#This array is always present in a mesh. The array is
#mNumVertices in size.
#
("mVertices", POINTER(Vector3D)),
#Vertex normals.
#The array contains normalized vectors, NULL if not present.
#The array is mNumVertices in size. Normals are undefined for
#point and line primitives. A mesh consisting of points and
#lines only may not have normal vectors. Meshes with mixed
#primitive types (i.e. lines and triangles) may have normals,
#but the normals for vertices that are only referenced by
#point or line primitives are undefined and set to QNaN (WARN:
#qNaN compares to inequal to *everything*, even to qNaN itself.
#Use code like this
#@code
##define IS_QNAN(f) (f != f)
#@endcode
#to check whether a field is qnan).
#@note Normal vectors computed by Assimp are always unit-length.
#However, this needn't apply for normals that have been taken
# directly from the model file.
#
("mNormals", POINTER(Vector3D)),
#Vertex tangents.
#The tangent of a vertex points in the direction of the positive
#X texture axis. The array contains normalized vectors, NULL if
#not present. The array is mNumVertices in size. A mesh consisting
#of points and lines only may not have normal vectors. Meshes with
#mixed primitive types (i.e. lines and triangles) may have
#normals, but the normals for vertices that are only referenced by
#point or line primitives are undefined and set to QNaN.
#@note If the mesh contains tangents, it automatically also
#contains bitangents (the bitangent is just the cross product of
#tangent and normal vectors).
#
("mTangents", POINTER(Vector3D)),
#Vertex bitangents.
#The bitangent of a vertex points in the direction of the positive
#Y texture axis. The array contains normalized vectors, NULL if not
#present. The array is mNumVertices in size.
#@note If the mesh contains tangents, it automatically also contains
#bitangents.
#
("mBitangents", POINTER(Vector3D)),
#Vertex color sets.
#A mesh may contain 0 to #AI_MAX_NUMBER_OF_COLOR_SETS vertex
#colors per vertex. NULL if not present. Each array is
#mNumVertices in size if present.
#
("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS),
#Vertex texture coords, also known as UV channels.
#A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per
#vertex. NULL if not present. The array is mNumVertices in size.
#
("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS),
#Specifies the number of components for a given UV channel.
#Up to three channels are supported (UVW, for accessing volume
#or cube maps). If the value is 2 for a given channel n, the
#component p.z of mTextureCoords[n][p] is set to 0.0f.
#If the value is 1 for a given channel, p.y is set to 0.0f, too.
#@note 4D coords are not supported
#
("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS),
#The faces the mesh is constructed from.
#Each face refers to a number of vertices by their indices.
#This array is always present in a mesh, its size is given
#in mNumFaces. If the AI_SCENE_FLAGS_NON_VERBOSE_FORMAT
#is NOT set each face references an unique set of vertices.
#
("mFaces", POINTER(Face)),
#The number of bones this mesh contains.
#Can be 0, in which case the mBones array is NULL.
#
("mNumBones", c_uint),
#The bones of this mesh.
#A bone consists of a name by which it can be found in the
#frame hierarchy and a set of vertex weights.
#
("mBones", POINTER(POINTER(Bone))),
#The material used by this mesh.
#A mesh does use only a single material. If an imported model uses
#multiple materials, the import splits up the mesh. Use this value
#as index into the scene's material list.
#
("mMaterialIndex", c_uint),
]
class Texture(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
#Width of the texture, in pixels
# #If mHeight is zero the texture is compressed in a format
#like JPEG. In this case mWidth specifies the size of the
#memory area pcData is pointing to, in bytes.
#
("mWidth", c_uint),
#Height of the texture, in pixels
# #If this value is zero, pcData points to an compressed texture
#in any format (e.g. JPEG).
#
("mHeight", c_uint),
#A hint from the loader to make it easier for applications
# to determine the type of embedded compressed textures.
# #If mHeight != 0 this member is undefined. Otherwise it
#is set set to '\\0\\0\\0\\0' if the loader has no additional
#information about the texture file format used OR the
#file extension of the format without a trailing dot. If there
#are multiple file extensions for a format, the shortest
#extension is chosen (JPEG maps to 'jpg', not to 'jpeg').
#E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case.
#The fourth character will always be '\\0'.
#
("achFormatHint", c_char*4),
#Data of the texture.
# #Points to an array of mWidth#mHeight aiTexel's.
#The format of the texture data is always ARGB8888 to
#make the implementation for user of the library as easy
#as possible. If mHeight = 0 this is a pointer to a memory
#buffer of size mWidth containing the compressed texture
#data. Good luck, have fun!
#
("pcData", POINTER(Texel)),
]
class Ray(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Position and direction of the ray
("pos", Vector3D),("dir", Vector3D),
]
class Scene(Structure):
"""
See 'aiScene.h' for details.
"""
AI_SCENE_FLAGS_INCOMPLETE = 0x1
AI_SCENE_FLAGS_VALIDATED = 0x2
AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4
AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8
AI_SCENE_FLAGS_TERRAIN = 0x10
_fields_ = [
#Any combination of the AI_SCENE_FLAGS_XXX flags. By default
#this value is 0, no flags are set. Most applications will
#want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE
#bit set.
#
("mFlags", c_uint),
#The root node of the hierarchy.
#
#There will always be at least the root node if the import
#was successful (and no special flags have been set).
#Presence of further nodes depends on the format and content
#of the imported file.
#
("mRootNode", POINTER(Node)),
#The number of meshes in the scene.#
("mNumMeshes", c_uint),
#The array of meshes.
# #Use the indices given in the aiNode structure to access
#this array. The array is mNumMeshes in size. If the
#AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
#be at least ONE material.
#
("mMeshes", POINTER(POINTER(Mesh))),
#The number of materials in the scene.#
("mNumMaterials", c_uint),
#The array of materials.
#
#Use the index given in each aiMesh structure to access this
#array. The array is mNumMaterials in size. If the
#AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
#be at least ONE material.
#
("mMaterials", POINTER(POINTER(Material))),
#The number of animations in the scene.#
("mNumAnimations", c_uint),
#The array of animations.
# #All animations imported from the given file are listed here.
#The array is mNumAnimations in size.
#
("mAnimations", POINTER(POINTER(Animation))),
#The number of textures embedded into the file#
("mNumTextures", c_uint),
#The array of embedded textures.
#
#Not many file formats embed their textures into the file.
#An example is Quake's MDL format (which is also used by
#some GameStudio versions)
#
("mTextures", POINTER(POINTER(Texture))),
#The number of light sources in the scene. Light sources
#are fully optional, in most cases this attribute will be 0
#
("mNumLights", c_uint),
#The array of light sources.
#
#All light sources imported from the given file are
#listed here. The array is mNumLights in size.
#
("mLights", POINTER(POINTER(Light))),
#The number of cameras in the scene. Cameras
#are fully optional, in most cases this attribute will be 0
#
("mNumCameras", c_uint),
#The array of cameras.
#
#All cameras imported from the given file are listed here.
#The array is mNumCameras in size. The first camera in the
#array (if existing) is the default camera view into
#the scene.
#
("mCameras", POINTER(POINTER(Camera))),
]
|
mit
| 2,478,706,221,965,106,700
| 34.411173
| 98
| 0.523901
| false
| 4.421456
| false
| false
| false
|
probardjango/Comercio-Digital
|
src/comerciodigital/settings.py
|
1
|
2734
|
"""
Django settings for comerciodigital project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'iqqvh9ulex94*fx(cl7c$#_a-39ru7ek-0f7f4h(jgtp874hgj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#nuestras apps
'productos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'comerciodigital.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'comerciodigital.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
mit
| 1,172,620,309,974,841,600
| 25.288462
| 71
| 0.695684
| false
| 3.460759
| false
| false
| false
|
beeftornado/sentry
|
tests/sentry/event_manager/interfaces/test_http.py
|
1
|
3923
|
from __future__ import absolute_import
import pytest
from sentry import eventstore
from sentry.event_manager import EventManager
@pytest.fixture
def make_http_snapshot(insta_snapshot):
def inner(data):
mgr = EventManager(data={"request": data})
mgr.normalize()
evt = eventstore.create_event(data=mgr.get_data())
interface = evt.interfaces.get("request")
insta_snapshot({"errors": evt.data.get("errors"), "to_json": interface.to_json()})
return inner
def test_basic(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com"))
def test_full(make_http_snapshot):
make_http_snapshot(
dict(
method="GET",
url="http://example.com",
query_string="foo=bar",
fragment="foobar",
headers={"x-foo-bar": "baz"},
cookies={"foo": "bar"},
env={"bing": "bong"},
data="hello world",
)
)
def test_query_string_as_dict(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", query_string={"foo": "bar"}))
def test_query_string_as_pairlist(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", query_string=[["foo", "bar"]]))
def test_data_as_dict(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", data={"foo": "bar"}))
def test_urlencoded_data(make_http_snapshot):
make_http_snapshot(
dict(
url="http://example.com",
headers={"Content-Type": "application/x-www-form-urlencoded"},
data="foo=bar",
)
)
def test_infer_urlencoded_content_type(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", data="foo=bar"))
def test_json_data(make_http_snapshot):
make_http_snapshot(
dict(
url="http://example.com",
headers={"Content-Type": "application/json"},
data='{"foo":"bar"}',
)
)
def test_infer_json_content_type(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", data='{"foo":"bar"}'))
def test_cookies_as_string(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", cookies="a=b;c=d"))
make_http_snapshot(dict(url="http://example.com", cookies="a=b;c=d"))
def test_cookies_in_header(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", headers={"Cookie": "a=b;c=d"}))
def test_cookies_in_header2(make_http_snapshot):
make_http_snapshot(
dict(url="http://example.com", headers={"Cookie": "a=b;c=d"}, cookies={"foo": "bar"})
)
def test_query_string_and_fragment_as_params(make_http_snapshot):
make_http_snapshot(
dict(url="http://example.com", query_string=u"foo\ufffd=bar\u2026", fragment="fragment")
)
def test_query_string_and_fragment_in_url(make_http_snapshot):
make_http_snapshot(dict(url=u"http://example.com?foo\ufffd=bar#fragment\u2026"))
def test_header_value_list(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", headers={"Foo": ["1", "2"]}))
def test_header_value_str(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", headers={"Foo": 1}))
def test_invalid_method(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", method="1234"))
def test_invalid_method2(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", method="A" * 33))
def test_invalid_method3(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", method="A"))
def test_unknown_method(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", method="TEST"))
def test_unknown_method2(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", method="FOO-BAR"))
def test_unknown_method3(make_http_snapshot):
make_http_snapshot(dict(url="http://example.com", method="FOO_BAR"))
|
bsd-3-clause
| -7,135,125,457,729,543,000
| 27.845588
| 96
| 0.642366
| false
| 3.266445
| true
| false
| false
|
YorkUIRLab/eosdb
|
topic_coherence.py
|
1
|
2591
|
# coding: utf-8
# In[4]:
import glob
from datetime import datetime
import logging as log
import gensim
import matplotlib.pyplot as plt
import pyLDAvis
import pyLDAvis.gensim
from gensim.models import CoherenceModel
from sklearn.externals import joblib
import gzip
from multiprocessing import Pool
get_ipython().magic(u'matplotlib notebook')
# In[1]:
class ModelSimilarity:
# Uses a model (e.g. Word2Vec model) to calculate the similarity between two terms.
def __init__(self, model):
self.model = model
def similarity(self, ranking_i, ranking_j):
sim = 0.0
pairs = 0
for term_i in ranking_i:
for term_j in ranking_j:
try:
sim += self.model.similarity(term_i, term_j)
pairs += 1
except:
# print "Failed pair (%s,%s)" % (term_i,term_j)
pass
if pairs == 0:
return 0.0
return sim / pairs
# In[2]:
class WithinTopicMeasure:
# Measures within-topic coherence for a topic model, based on a set of term rankings.
def __init__(self, metric):
self.metric = metric
def evaluate_ranking(self, term_ranking):
return self.metric.similarity(term_ranking, term_ranking)
def evaluate_rankings(self, term_rankings):
scores = []
overall = 0.0
for topic_index in range(len(term_rankings)):
score = self.evaluate_ranking(term_rankings[topic_index])
scores.append(score)
overall += score
overall /= len(term_rankings)
return overall
# In[13]:
# To get the topic words from the model
def get_topics(ldamodel, num_topics, num_words):
topics = []
for topic_id, topic in ldamodel.show_topics(num_topics=num_topics, num_words=num_words, formatted=False):
topic = [word for word, _ in topic]
topics.append(topic)
return topics
ldamodel = joblib.load('data/eos/lda/28_LDAmodel_EOS.pkl')
print(ldamodel)
print(get_topics(ldamodel, 28, 10))
# In[18]:
model_path = 'data/eos/word2vec_model_all.model'
log.info("Loading Word2Vec model from %s ..." % model_path)
model = gensim.models.Word2Vec.load(model_path)
metric = ModelSimilarity(model)
validation_measure = WithinTopicMeasure(metric)
topic_num = 28
truncated_term_rankings = get_topics(ldamodel, topic_num, 10)
coherence_scores = validation_measure.evaluate_rankings(truncated_term_rankings)
log.info("Model coherence (k=%d) = %.4f" % (topic_num, coherence_scores))
print(coherence_scores)
# In[ ]:
|
lgpl-3.0
| -2,668,634,247,923,791,000
| 24.15534
| 109
| 0.643381
| false
| 3.378096
| false
| false
| false
|
gallupliu/QA
|
data/models.py
|
1
|
2787
|
# encoding: utf-8
"""
@author: gallupliu
@contact: gallup-liu@hotmail.com
@version: 1.0
@license: Apache Licence
@file: models.py
@time: 2018/1/12 23:19
"""
from data.util import unique_items
class MetadataItem(object):
def __init__(self):
self.metadata = dict()
class Token(MetadataItem):
def __init__(self, text):
"""
:type text: str
"""
super(Token, self).__init__()
self.text = text
class Sentence(MetadataItem):
def __init__(self, text, tokens):
"""
:type text: str
:type tokens: list[Token]
"""
super(Sentence, self).__init__()
self.text = text
self.tokens = tokens
@property
def vocab(self):
vocab = []
for token in self.tokens:
vocab.append(token.text)
return unique_items(vocab)
class TextItem(MetadataItem):
def __init__(self, text, sentences):
"""
:type text: str
:type sentences: list[Sentence]
"""
super(TextItem, self).__init__()
self.text = text
self.sentences = sentences
@property
def vocab(self):
vocab = []
for sentence in self.sentences:
vocab += sentence.vocab
return unique_items(vocab)
class QAPool(object):
def __init__(self, question, pooled_answers, ground_truth):
"""
:type question: TextItem
:type pooled_answers: list[TextItem]
:type ground_truth: list[TextItem]
"""
self.question = question
self.pooled_answers = pooled_answers
self.ground_truth = ground_truth
class Data(object):
def __init__(self, split_name, qa, answers):
"""
:type split_name: str
:type qa: list[QAPool]
:type answers: list[TextItem]
"""
self.split_name = split_name
self.qa = qa
self.answers = answers
class Archive(object):
def __init__(self, train, valid, test, questions, answers):
"""
:type train: Data
:type valid: Data
:type test: list[Data]
:type questions: list[TexItem]
:type answers: list[TexItem]
"""
self.train = train
self.valid = valid
self.test = test
self.questions = questions
self.answers = answers
self._vocab = None # lazily created
@property
def vocab(self):
"""
:rtype: set
"""
if self._vocab is None:
self._vocab = []
for question in self.questions:
self._vocab += question.vocab
for answer in self.answers:
self._vocab += answer.vocab
self._vocab = unique_items(self._vocab)
return self._vocab
|
apache-2.0
| -6,584,500,800,930,892,000
| 21.119048
| 63
| 0.541442
| false
| 3.953191
| false
| false
| false
|
comsaint/legco-watch
|
app/raw/tests/test_agenda.py
|
1
|
7371
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Tests for CouncilAgenda object
from django.test import TestCase
import logging
from raw.docs import agenda
# We use fixtures which are raw HTML versions of the agendas to test the parser
# Each test case works with one source.
logging.disable(logging.CRITICAL)
class Agenda20140709TestCase(TestCase):
def setUp(self):
with open('raw/tests/fixtures/council_agenda-20140709-e.html', 'rb') as f:
self.src = f.read().decode('utf-8')
self.parser = agenda.CouncilAgenda('council_agenda-20140709-e', self.src)
def test_tabled_papers_count(self):
# 8 subsidiary legislation and 29 other papers
self.assertEqual(len(self.parser.tabled_papers), 37)
def test_tabled_papers_class(self):
for p in self.parser.tabled_papers[0:8]:
self.assertTrue(isinstance(p, agenda.TabledLegislation))
for p in self.parser.tabled_papers[8:37]:
self.assertTrue(isinstance(p, agenda.OtherTabledPaper))
def test_spot_check_tabled_papers(self):
foo = self.parser.tabled_papers[3]
self.assertEqual(foo.title, u'Timber Stores (Amendment) Regulation 2014')
self.assertEqual(foo.number, u'106/2014')
foo = self.parser.tabled_papers[9]
self.assertEqual(foo.title, u'No. 120 - Sir Robert Black Trust Fund Report of the Trustee on the Administration of the Fund for the year ended 31 March 2014')
self.assertEqual(foo.presenter, u'Secretary for Home Affairs')
foo = self.parser.tabled_papers[27]
self.assertEqual(foo.title, u'Report of the Panel on Food Safety and Environmental Hygiene 2013-2014')
self.assertEqual(foo.presenter, u'Dr Hon Helena WONG')
def test_questions_count(self):
self.assertEqual(len(self.parser.questions), 22)
def test_spot_check_questions(self):
foo = self.parser.questions[8]
self.assertEqual(foo.asker, u'Hon WONG Yuk-man')
self.assertEqual(foo.replier, u'Secretary for Security')
self.assertEqual(foo.type, agenda.AgendaQuestion.QTYPE_WRITTEN)
def test_bills_count(self):
self.assertEqual(len(self.parser.bills), 9)
def test_spot_check_bills(self):
foo = self.parser.bills[1]
self.assertEqual(foo.reading, agenda.BillReading.FIRST)
self.assertEqual(foo.title, u'Land (Miscellaneous Provisions) (Amendment) Bill 2014')
self.assertEqual(foo.attendees, [])
foo = self.parser.bills[3]
self.assertEqual(foo.reading, agenda.BillReading.SECOND)
self.assertEqual(foo.title, u'Land (Miscellaneous Provisions) (Amendment) Bill 2014')
self.assertEqual(foo.attendees, [u'Secretary for Development'])
foo = self.parser.bills[7]
self.assertEqual(foo.reading, agenda.BillReading.SECOND_THIRD)
self.assertEqual(foo.title, u'Stamp Duty (Amendment) Bill 2013')
self.assertEqual(len(foo.attendees), 2, foo.attendees)
self.assertEqual(set(foo.attendees), {u'Secretary for Financial Services and the Treasury',
u'Under Secretary for Financial Services and the Treasury'})
self.assertEqual(len(foo.amendments), 3)
class Agenda20130508TestCase(TestCase):
def setUp(self):
with open('raw/tests/fixtures/council_agenda-20130508-e.html', 'rb') as f:
self.src = f.read().decode('utf-8')
self.parser = agenda.CouncilAgenda('council_agenda-20130508-e', self.src)
def test_count_tabled_papers(self):
self.assertEqual(len(self.parser.tabled_papers), 9)
def test_tabled_papers_type(self):
for p in self.parser.tabled_papers[0:8]:
self.assertTrue(isinstance(p, agenda.TabledLegislation))
self.assertTrue(isinstance(self.parser.tabled_papers[8], agenda.OtherTabledPaper))
def test_spot_check_tabled_papers(self):
foo = self.parser.tabled_papers[2]
self.assertEqual(foo.title, u'Trade Marks Ordinance (Amendment of Schedule 1) Regulation 2013')
self.assertEqual(foo.number, u'64/2013')
foo = self.parser.tabled_papers[8]
self.assertEqual(foo.title, u'No. 92 - Financial statements for the year ended 31 August 2012')
self.assertEqual(foo.presenter, u'Secretary for Education')
def test_questions_count(self):
self.assertEqual(len(self.parser.questions), 22)
def test_spot_check_questions(self):
foo = self.parser.questions[21]
self.assertEqual(foo.asker, u'Emily LAU')
self.assertEqual(foo.replier, u'Secretary for Financial Services and the Treasury')
self.assertEqual(foo.type, agenda.AgendaQuestion.QTYPE_WRITTEN)
def test_bills_count(self):
self.assertEqual(len(self.parser.bills), 8)
def test_spot_check_bills(self):
foo = self.parser.bills[0]
self.assertEqual(foo.title, u'Hong Kong Arts Development Council (Amendment) Bill 2013')
self.assertEqual(foo.reading, agenda.BillReading.FIRST)
self.assertEqual(foo.amendments, [])
self.assertEqual(foo.attendees, [])
foo = self.parser.bills[6]
self.assertEqual(foo.title, u'Appropriation Bill 2013')
self.assertEqual(foo.reading, agenda.BillReading.SECOND_THIRD)
self.assertEqual(len(foo.amendments), 1)
# Attendees on these appropriations bills are always tricky
foo = self.parser.bills[7]
self.assertEqual(foo.title, u'Pilotage (Amendment) Bill 2013')
self.assertEqual(foo.reading, agenda.BillReading.SECOND_THIRD)
self.assertEqual(foo.attendees, [u'Secretary for Transport and Housing'])
self.assertEqual(foo.amendments, [])
class Agenda20140430TestCase(TestCase):
def setUp(self):
with open('raw/tests/fixtures/council_agenda-20140430-c.html', 'rb') as f:
self.src = f.read().decode('utf-8')
self.parser = agenda.CouncilAgenda('council_agenda-20130430-c', self.src)
def test_count_tabled_papers(self):
self.assertEqual(len(self.parser.tabled_papers), 7)
def test_tabled_papers_type(self):
for p in self.parser.tabled_papers[0:4]:
self.assertTrue(isinstance(p, agenda.TabledLegislation))
for p in self.parser.tabled_papers[4:7]:
self.assertTrue(isinstance(p, agenda.OtherTabledPaper))
def test_spot_check_papers(self):
pass
def test_questions_count(self):
self.assertEqual(len(self.parser.questions), 18)
def test_questions_spot_check(self):
foo = self.parser.questions[7]
self.assertEqual(foo.asker, u'張超雄')
self.assertEqual(foo.replier, u'發展局局長')
self.assertEqual(foo.type, agenda.AgendaQuestion.QTYPE_ORAL)
def test_bills_count(self):
self.assertEqual(len(self.parser.bills), 9)
def test_bills_spot_check(self):
foo = self.parser.bills[2]
self.assertEqual(foo.title, u'《電子健康紀錄互通系統條例草案》')
self.assertEqual(foo.attendees, [])
self.assertEqual(foo.reading, agenda.BillReading.FIRST)
foo = self.parser.bills[8]
self.assertEqual(foo.title, u'《2014年撥款條例草案》')
self.assertEqual(set(foo.attendees), {u'財政司司長'})
self.assertEqual(foo.reading, agenda.BillReading.THIRD)
|
mit
| -6,964,750,088,137,517,000
| 41.16763
| 166
| 0.677039
| false
| 3.19256
| true
| false
| false
|
ajaygarg84/sugar
|
src/jarabe/view/keyhandler.py
|
1
|
7320
|
# Copyright (C) 2006-2007, Red Hat, Inc.
# Copyright (C) 2009 Simon Schampijer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
from gi.repository import Gdk
from gi.repository import SugarExt
from jarabe.model import sound
from jarabe.model import shell
from jarabe.model import session
from jarabe.view.tabbinghandler import TabbingHandler
from jarabe.model.shell import ShellModel
from jarabe import config
from jarabe.journal import journalactivity
_VOLUME_STEP = sound.VOLUME_STEP
_VOLUME_MAX = 100
_TABBING_MODIFIER = Gdk.ModifierType.MOD1_MASK
_actions_table = {
'F1': 'zoom_mesh',
'F2': 'zoom_group',
'F3': 'zoom_home',
'F4': 'zoom_activity',
'F5': 'open_search',
'F6': 'frame',
'XF86AudioMute': 'volume_mute',
'F11': 'volume_down',
'XF86AudioLowerVolume': 'volume_down',
'F12': 'volume_up',
'XF86AudioRaiseVolume': 'volume_up',
'<alt>F11': 'volume_min',
'<alt>F12': 'volume_max',
'XF86MenuKB': 'frame',
'<alt>Tab': 'next_window',
'<alt><shift>Tab': 'previous_window',
'<alt>Escape': 'close_window',
'XF86WebCam': 'open_search',
# the following are intended for emulator users
'<alt><shift>f': 'frame',
'<alt><shift>q': 'quit_emulator',
'XF86Search': 'open_search',
'<alt><shift>o': 'open_search'
}
_instance = None
class KeyHandler(object):
def __init__(self, frame):
self._frame = frame
self._key_pressed = None
self._keycode_pressed = 0
self._keystate_pressed = 0
self._key_grabber = SugarExt.KeyGrabber()
self._key_grabber.connect('key-pressed',
self._key_pressed_cb)
self._key_grabber.connect('key-released',
self._key_released_cb)
self._tabbing_handler = TabbingHandler(self._frame, _TABBING_MODIFIER)
for f in os.listdir(os.path.join(config.ext_path, 'globalkey')):
if f.endswith('.py') and not f.startswith('__'):
module_name = f[:-3]
try:
logging.debug('Loading module %r', module_name)
module = __import__('globalkey.' + module_name, globals(),
locals(), [module_name])
for key in module.BOUND_KEYS:
if key in _actions_table:
raise ValueError('Key %r is already bound' % key)
_actions_table[key] = module
except Exception:
logging.exception('Exception while loading extension:')
self._key_grabber.grab_keys(_actions_table.keys())
def _change_volume(self, step=None, value=None):
if step is not None:
volume = sound.get_volume() + step
elif value is not None:
volume = value
volume = min(max(0, volume), _VOLUME_MAX)
sound.set_volume(volume)
sound.set_muted(volume == 0)
def handle_previous_window(self, event_time):
self._tabbing_handler.previous_activity(event_time)
def handle_next_window(self, event_time):
self._tabbing_handler.next_activity(event_time)
def handle_close_window(self, event_time):
active_activity = shell.get_model().get_active_activity()
if active_activity.is_journal():
return
active_activity.get_window().close()
def handle_zoom_mesh(self, event_time):
shell.get_model().set_zoom_level(ShellModel.ZOOM_MESH, event_time)
def handle_zoom_group(self, event_time):
shell.get_model().set_zoom_level(ShellModel.ZOOM_GROUP, event_time)
def handle_zoom_home(self, event_time):
shell.get_model().set_zoom_level(ShellModel.ZOOM_HOME, event_time)
def handle_zoom_activity(self, event_time):
shell.get_model().set_zoom_level(ShellModel.ZOOM_ACTIVITY, event_time)
def handle_volume_max(self, event_time):
self._change_volume(value=_VOLUME_MAX)
def handle_volume_min(self, event_time):
self._change_volume(value=0)
def handle_volume_mute(self, event_time):
if sound.get_muted() is True:
sound.set_muted(False)
else:
sound.set_muted(True)
def handle_volume_up(self, event_time):
self._change_volume(step=_VOLUME_STEP)
def handle_volume_down(self, event_time):
self._change_volume(step=-_VOLUME_STEP)
def handle_frame(self, event_time):
self._frame.notify_key_press()
def handle_quit_emulator(self, event_time):
session.get_session_manager().shutdown()
def handle_open_search(self, event_time):
journalactivity.get_journal().show_journal()
def _key_pressed_cb(self, grabber, keycode, state, event_time):
key = grabber.get_key(keycode, state)
logging.debug('_key_pressed_cb: %i %i %s', keycode, state, key)
if key is not None:
self._key_pressed = key
self._keycode_pressed = keycode
self._keystate_pressed = state
action = _actions_table[key]
if self._tabbing_handler.is_tabbing():
# Only accept window tabbing events, everything else
# cancels the tabbing operation.
if not action in ['next_window', 'previous_window']:
self._tabbing_handler.stop(event_time)
return True
if hasattr(action, 'handle_key_press'):
action.handle_key_press(key)
elif isinstance(action, basestring):
method = getattr(self, 'handle_' + action)
method(event_time)
else:
raise TypeError('Invalid action %r' % action)
return True
else:
# If this is not a registered key, then cancel tabbing.
if self._tabbing_handler.is_tabbing():
if not grabber.is_modifier(keycode):
self._tabbing_handler.stop(event_time)
return True
return False
def _key_released_cb(self, grabber, keycode, state, event_time):
logging.debug('_key_released_cb: %i %i', keycode, state)
if self._tabbing_handler.is_tabbing():
# We stop tabbing and switch to the new window as soon as the
# modifier key is raised again.
if grabber.is_modifier(keycode, mask=_TABBING_MODIFIER):
self._tabbing_handler.stop(event_time)
return True
return False
def setup(frame):
global _instance
if _instance:
del _instance
_instance = KeyHandler(frame)
|
gpl-2.0
| -8,039,884,357,420,951,000
| 33.046512
| 78
| 0.609426
| false
| 3.765432
| false
| false
| false
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckanext-dcatapit/ckanext/dcatapit/dcat/harvester.py
|
1
|
3892
|
import logging
import ckan.plugins as p
from ckanext.dcat.interfaces import IDCATRDFHarvester
from ckanext.dcatapit.dcat.profiles import LOCALISED_DICT_NAME_BASE, LOCALISED_DICT_NAME_RESOURCES
import ckanext.dcatapit.interfaces as interfaces
log = logging.getLogger(__name__)
class DCATAPITHarvesterPlugin(p.SingletonPlugin):
p.implements(IDCATRDFHarvester)
def before_download(self, url, harvest_job):
return url, []
def after_download(self, content, harvest_job):
return content, []
def before_update(self, harvest_object, dataset_dict, temp_dict):
self._before(dataset_dict, temp_dict)
def after_update(self, harvest_object, dataset_dict, temp_dict):
return self._after(dataset_dict, temp_dict)
def before_create(self, harvest_object, dataset_dict, temp_dict):
self._before(dataset_dict, temp_dict)
def after_create(self, harvest_object, dataset_dict, temp_dict):
return self._after(dataset_dict, temp_dict)
def _before(self, dataset_dict, temp_dict):
loc_dict = dataset_dict.pop(LOCALISED_DICT_NAME_BASE, {})
res_dict = dataset_dict.pop(LOCALISED_DICT_NAME_RESOURCES, {})
if loc_dict or res_dict:
temp_dict['dcatapit'] = {
LOCALISED_DICT_NAME_BASE: loc_dict,
LOCALISED_DICT_NAME_RESOURCES: res_dict
}
def _after(self, dataset_dict, temp_dict):
dcatapit_dict = temp_dict.get('dcatapit')
if not dcatapit_dict:
return None
base_dict = dcatapit_dict[LOCALISED_DICT_NAME_BASE]
if base_dict:
pkg_id = dataset_dict['id']
err = self._save_package_multilang(pkg_id, base_dict)
if err:
return err
resources_dict = dcatapit_dict[LOCALISED_DICT_NAME_RESOURCES]
if resources_dict:
err = self._save_resources_multilang(pkg_id, resources_dict)
if err:
return err
##
# Managing Solr indexes for harvested package dict
##
interfaces.update_solr_package_indexes(dataset_dict)
return None
def _save_package_multilang(self, pkg_id, base_dict):
try:
for field, lang_dict in base_dict.iteritems():
for lang, text in lang_dict.iteritems():
interfaces.upsert_package_multilang(pkg_id, field, 'package', lang, text)
except Exception, e:
return str(e)
return None
def _save_resources_multilang(self, pkg_id, resources_dict):
try:
uri_id_mapping = self._get_resource_uri_id_mapping(pkg_id)
for res_uri, res_dict in resources_dict.iteritems():
res_id = uri_id_mapping.get(res_uri, None)
if not res_id:
log.warn("Could not find resource id for URI %s", res_uri)
continue
for field, lang_dict in res_dict.iteritems():
for lang, text in lang_dict.iteritems():
interfaces.upsert_resource_multilang(res_id, field, lang, text)
except Exception, e:
return str(e)
return None
def _get_resource_uri_id_mapping(self, pkg_id):
ret = {}
# log.info("DATASET DICT: %s", dataset_dict)
dataset = p.toolkit.get_action('package_show')({}, {'id': pkg_id})
# log.info("DATASET ----------- %s", dataset)
for resource in dataset.get('resources', []):
res_id = resource.get('id', None)
res_uri = resource.get('uri', None)
if res_id and res_uri:
log.debug('Mapping resource id %s to URI "%s"', res_id, res_uri)
ret[res_uri] = res_id
else:
log.warn("Can't map URI for resource \"%s\"", resource.get('name', '---'))
return ret
|
gpl-3.0
| 3,424,962,919,449,245,000
| 34.063063
| 98
| 0.5889
| false
| 3.671698
| false
| false
| false
|
nfsli926/stock
|
python/com/nfs/util/dbutil.py
|
1
|
21744
|
# coding=utf-8
# 2015年9月25创建
__author__ = 'litao'
from sqlalchemy import create_engine
import tushare as ts
import urllib
import urllib2
import re
import sys
import csv
import MySQLdb
import tushare as ts
import datetime
import time
import DateUtil as dateutil
# 导入股票前复权数据
#code:string,股票代码 e.g. 600848
#start:string,开始日期 format:YYYY-MM-DD 为空时取当前日期
#end:string,结束日期 format:YYYY-MM-DD 为空时取去年今日
#autype:string,复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
#index:Boolean,是否是大盘指数,默认为False
#retry_count : int, 默认3,如遇网络等问题重复执行的次数
#pause : int, 默认 0,重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
#返回值说明:
#date : 交易日期 (index)
#open : 开盘价
#high : 最高价
#close : 收盘价
#low : 最低价
#volume : 成交量
#amount : 成交金额
def get_qfq_date(code,start,end):
try:
df = ts.get_h_data(code,start,end)
print start+end
if df is None:
print "qfq df is none"
else:
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.insert(0,'code',code)
df.to_sql('stock_qfq_data', engine, if_exists='append')
print code + " qfq success"
except Exception,e:
print e.message
# 导入股票的不复权的历史数据
#code:string,股票代码 e.g. 600848
#start:string,开始日期 format:YYYY-MM-DD 为空时取当前日期
#end:string,结束日期 format:YYYY-MM-DD 为空时取去年今日
#autype:string,复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
#index:Boolean,是否是大盘指数,默认为False
#retry_count : int, 默认3,如遇网络等问题重复执行的次数
#pause : int, 默认 0,重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
#返回值说明:
#date : 交易日期 (index)
#open : 开盘价
#high : 最高价
#close : 收盘价
#low : 最低价
#volume : 成交量
#amount : 成交金额
def get_bfq_data(code,startdate,enddate):
try:
print "sdfsdf"
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
print startdate+enddate
df = ts.get_h_data(code,start=startdate,end=enddate,autype='None')
if df is None :
print " day df is none"
else:
df.insert(0,'code',code)
df.to_sql('stock_bfq_data', engine, if_exists='append')
except Exception,e:
e.message
# 获得股票的日分笔数据
def get_day_data(code,startdate,enddate):
try:
df = ts.get_hist_data(code,start=startdate,end=enddate,ktype='D')
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
if df is None:
print " day df is none"
else:
df.insert(0,'code',code)
df.to_sql('stock_day_data', engine, if_exists='append')
except Exception,e:
print e.message
# 获得股票的周分笔数据
def get_week_data(code,startdate,enddate):
try:
df = ts.get_hist_data(code,start=startdate,end=enddate,ktype='W')
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.insert(0,'code',code)
df.to_sql('stock_week_data', engine, if_exists='append')
print code + " week success"
except Exception,e:
print e.message
# 获得股票的月分笔数据
def get_month_data(code,startdate,enddate):
try:
df = ts.get_hist_data(code,start=startdate,end=enddate,ktype='M')
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.insert(0,'code',code)
df.to_sql('stock_month_data', engine, if_exists='append')
print code + " month success"
except Exception,e:
print e.message
# 获得股票的月分笔数据
def get_five_data(code,startdate,enddate):
try:
df = ts.get_hist_data(code,start=startdate,end=enddate,ktype='5')
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.insert(0,'code',code)
df.to_sql('stock_five_data', engine, if_exists='append')
print code + " five success"
except Exception,e:
print e.message
# 获得行业分类
def get_industry_classified():
try:
df = ts.get_industry_classified();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('industry_classified', engine, if_exists='append')
except Exception, e:
e.message
#ts.get_hist_data('sh')#获取上证指数k线数据,其它参数与个股一致,下同
#ts.get_hist_data('sz')#获取深圳成指k线数据
#ts.get_hist_data('hs300')#获取沪深300指数k线数据
#ts.get_hist_data('sz50')#获取上证50指数k线数据
#ts.get_hist_data('zxb')#获取中小板指数k线数据
#ts.get_hist_data('cyb')#获取创业板指数k线数据
# 获得概念分类
def get_concept_classified():
try:
df = ts.get_concept_classified();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('concept_classified', engine, if_exists='append')
except Exception, e:
e.message
# 获得地域分类
def get_area_classified():
try:
df = ts.get_area_classified();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('area_classified', engine, if_exists='append')
except Exception, e:
e.message
# 获得中小板分类
# 获得创业板分类
# 获得风险警示板分类
def get_st_classified():
try:
df = ts.get_st_classified();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('st_classified', engine, if_exists='append')
except Exception, e:
e.message
# 沪深300成分及权重
def get_hs300s():
try:
df = ts.get_hs300s();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('hs300s', engine, if_exists='append')
except Exception, e:
e.message
# 上证50成分股
def get_sz50s():
try:
df = ts.get_sz50s();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('sz50s', engine, if_exists='append')
except Exception, e:
e.message
# 中证500成分股
def get_zz500s():
try:
df = ts.get_zz500s();
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
# df.insert(0,'code','600848')
df.to_sql('zz500s', engine, if_exists='append')
except Exception, e:
e.message
# 获得股票的基本数据--业绩报表
# 获取2014年第3季度的业绩报表数据
# ts.get_report_data(2014,3)
def get_report_data(year, quarter):
try:
df = ts.get_report_data(year, quarter)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('report_data', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获得股票的基本数据--盈利能力
def get_profit_data(year, quarter):
try:
df = ts.get_profit_data(year, quarter)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('profit_data', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获得股票的基本数据--营运能力
def get_operation_data(year, quarter):
try:
df = ts.get_operation_data(year, quarter)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('operation_data', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获得股票的基本数据--成长能力
def get_growth_data(year, quarter):
try:
df = ts.get_growth_data(year, quarter)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('growth_data', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获得股票的基本数据--偿债能力
def get_debtpaying_data(year, quarter):
try:
df = ts.get_debtpaying_data(year, quarter)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('debtpaying_data', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获得股票的基本数据--现金流量
def get_cashflow_data(year, quarter):
try:
df = ts.get_cashflow_data(year, quarter)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('cashflow_data', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获取宏观经济数据 -存款利率
def get_deposit_rate():
try:
df = ts.get_deposit_rate()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('deposit_rate', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获取宏观经济数据 -贷款利率
def get_loan_rate():
try:
df = ts.get_loan_rate()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('loan_rate', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获取宏观经济数据 -存款准备金绿
def get_rrr(year, quarter):
try:
df = ts.get_rrr()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('rrr', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获取宏观经济数据 -货币供应量
def get_money_supply():
try:
df = ts.get_money_supply()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('money_supply', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获取宏观经济数据 -国内生产总值(年度)
def get_gdp_year():
try:
df = ts.get_gdp_year()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('gdp_year', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 获取宏观经济数据 -国内生产总值(季度)
def get_gdp_quarter():
try:
df = ts.get_gdp_quarter()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('gdp_quarter', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 三大需求对gdp贡献
def get_gdp_for():
try:
df = ts.get_gdp_for()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('gdp_for', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 三大需求对gdp拉动
def get_gdp_pull():
try:
df = ts.get_gdp_pull()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('gdp_pull', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 三大产业贡献率
def get_gdp_contrib():
try:
df = ts.get_gdp_contrib()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('gdp_contrib', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 居民价格消费指数
def get_cpi():
try:
df = ts.get_cpi()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('cpi', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 工业品出厂价格指数
def get_ppi():
try:
df = ts.get_ppi()
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('ppi', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 龙虎榜数据
def get_top_list(date):
try:
df = ts.top_list(date)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('top_list', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 每日龙虎榜数据
def cap_tops(days,retry_count,pause):
try:
df = ts.cap_tops(days,retry_count,pause)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('cap_tops', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 个股上榜统计
def cap_tops(days,retry_count,pause):
try:
df = ts.cap_tops(days,retry_count,pause)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('cap_tops', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 营业部上榜统计
def broker_tops(days,retry_count,pause):
try:
df = ts.broker_tops(days,retry_count,pause)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('broker_tops', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 机构席位追踪
def inst_tops(days,retry_count,pause):
try:
df = ts.inst_tops(days,retry_count,pause)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('inst_tops', engine, if_exists='append')
print "message"
except Exception, e:
e.message
# 机构成交明细
def inst_detail(retry_count,pause):
try:
df = ts.inst_detail(retry_count,pause)
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
df.to_sql('inst_detail', engine, if_exists='append')
print "message"
except Exception, e:
e.message
#获得日k线数据中一直股票的最大时间
def get_day_maxdate(stockno):
try:
sql = "select max(date) maxdate from stock_day_data where code='"+stockno+"'"
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
#df.to_sql('inst_detail', engine, if_exists='append')
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if maxdate=='':
stockDf =ts.get_stock_basics()
sssj = str(stockDf.ix[stockno]['timeToMarket']) #上市日期YYYYMMDD
return dateutil.convertDate(sssj)
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得周线线数据中股票的最大时间
def get_week_maxdate(stockno):
try:
sql = "select max(date) maxdate from stock_week_data where code='"+stockno+"'"
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
#df.to_sql('inst_detail', engine, if_exists='append')
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if maxdate=='':
stockDf =ts.get_stock_basics()
sssj = str(stockDf.ix[stockno]['timeToMarket']) #上市日期YYYYMMDD
return dateutil.convertDate(sssj)
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得月K线数据中一直股票的最大时间
def get_month_maxdate(stockno):
try:
sql = "select max(date) maxdate from stock_month_data where code='"+stockno+"'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if maxdate=='':
stockDf =ts.get_stock_basics()
sssj = str(stockDf.ix[stockno]['timeToMarket']) #上市日期YYYYMMDD
return dateutil.convertDate(sssj)
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得前复权数据中一直股票的最大时间
def get_qfq_maxdate(stockno):
try:
sql = "select max(date) maxdate from stock_qfq_data where code='"+stockno+"'"
engine = create_engine('mysql://root:123456@127.0.0.1/stock?charset=utf8')
#df.to_sql('inst_detail', engine, if_exists='append')
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0][0:10]
cursor.close()
conn.close
if maxdate=='':
stockDf =ts.get_stock_basics()
sssj = str(stockDf.ix[stockno]['timeToMarket']) #上市日期YYYYMMDD
return dateutil.convertDate(sssj)
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得不复权stock_bfq_data表中的一只股票的最大时间
def get_bfq_maxdate(stockno):
try:
sql = "select max(transdate) maxdate from stock_bfq_data where code='"+stockno+"'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if len(maxdate)>10:
maxdate=maxdate[0:10]
if maxdate=='':
stockDf =ts.get_stock_basics()
sssj = str(stockDf.ix[stockno]['timeToMarket']) #上市日期YYYYMMDD
return dateutil.convertDate(sssj)
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#399001 深圳成指
#399006 创业扳指
#399005 中小板指
#399300 沪深300
#000001
#获得深圳成指的最大时间
def get_szcz_maxdate():
try:
sql = "select max(date) maxdate from stock_index_data where code='399001'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if len(maxdate)>10:
maxdate=maxdate[0:10]
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得创业板指数的最大时间
def get_cybz_maxdate():
try:
sql = "select max(date) maxdate from stock_index_data where code='399006'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if len(maxdate)>10:
maxdate=maxdate[0:10]
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得中小板指数的最大时间
def get_zxbz_maxdate():
try:
sql = "select max(date) maxdate from stock_index_data where code='399005'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if len(maxdate)>10:
maxdate=maxdate[0:10]
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得沪深300指数的最大时间
def get_hs300_maxdate():
try:
sql = "select max(date) maxdate from stock_index_data where code='399300'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if len(maxdate)>10:
maxdate=maxdate[0:10]
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
#获得上证指数的最大时间
def get_szzs_maxdate():
try:
sql = "select max(date) maxdate from stock_index_data where code='000001'"
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='stock')
cursor = conn.cursor()
n = cursor.execute(sql)
maxdate = ''
for r in cursor:
maxdate = r[0]
cursor.close()
conn.close
if len(maxdate)>10:
maxdate=maxdate[0:10]
return dateutil.get_next_day(maxdate)
except Exception,e:
print e.message
|
apache-2.0
| -292,003,996,196,294,460
| 30.665079
| 90
| 0.607129
| false
| 2.684069
| false
| false
| false
|
FrancescSala/Instagram-Challenge
|
solution/shr.py
|
1
|
1485
|
from PIL import Image
from random import shuffle
import sys
if len(sys.argv) != 4: sys.exit("Usage: python shr.py input_image num_shreds output_image")
# load the input image
img = Image.open(sys.argv[1])
# read the desired number of shreds
numShreds = int(sys.argv[2])
if numShreds < 2: sys.exit("Expected number of shreds to be at least 2")
if img.width % numShreds != 0:
print "The number of shreds must be a submultiple of the width of the image: ", img.width
sys.exit()
# prepare the shred of the image
sequence = range(0, numShreds)
shuffle(sequence)
# check the sequence in order to make sure that there are not contiguous shreds in the sequence
# if there are, just swap them
# in other words, make sure all the shreds in the shredded image will be exactly the same width
for i in range(len(sequence)-1):
# if contiguous shreds, swap them
if sequence[i] == sequence[i+1] - 1:
sequence[i] = sequence[i] + 1
sequence[i+1] = sequence[i+1] - 1
# calculate the width of the shreds
shredWidth = img.width / numShreds
# create the shredded image
shredded = Image.new(img.mode, img.size)
for i, shred_index in enumerate(sequence):
shred_x1, shred_y1 = shredWidth * shred_index, 0
shred_x2, shred_y2 = shred_x1 + shredWidth, img.height
shredded.paste(img.crop((shred_x1, shred_y1, shred_x2, shred_y2)), (shredWidth * i, 0))
# finally, save the shredded image
shredded.save(sys.argv[3])
print "Shredded image saved as: ", sys.argv[3]
|
mit
| 5,471,137,774,938,985,000
| 32.75
| 95
| 0.708418
| false
| 3.006073
| false
| false
| false
|
ultrabug/py3status
|
py3status/modules/vpn_status.py
|
1
|
4561
|
"""
Drop-in replacement for i3status run_watch VPN module.
Expands on the i3status module by displaying the name of the connected vpn
using pydbus. Asynchronously updates on dbus signals unless check_pid is True.
Configuration parameters:
cache_timeout: How often to refresh in seconds when check_pid is True.
(default 10)
check_pid: If True, act just like the default i3status module.
(default False)
format: Format of the output.
(default 'VPN: {name}|VPN: no')
pidfile: Same as i3status pidfile, checked when check_pid is True.
(default '/sys/class/net/vpn0/dev_id')
Format placeholders:
{name} The name and/or status of the VPN.
Color options:
color_bad: VPN connected
color_good: VPN down
Requires:
pydbus: Which further requires PyGi. Check your distribution's repositories.
@author Nathan Smith <nathan AT praisetopia.org>
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'VPN: yes'}
off
{'color': '#FF0000', 'full_text': u'VPN: no'}
"""
from pydbus import SystemBus
from gi.repository import GObject
from threading import Thread
from time import sleep
from pathlib import Path
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
check_pid = False
format = "VPN: {name}|VPN: no"
pidfile = "/sys/class/net/vpn0/dev_id"
def post_config_hook(self):
self.thread_started = False
self.active = []
def _start_handler_thread(self):
"""Called once to start the event handler thread."""
# Create handler thread
t = Thread(target=self._start_loop)
t.daemon = True
# Start handler thread
t.start()
self.thread_started = True
def _start_loop(self):
"""Starts main event handler loop, run in handler thread t."""
# Create our main loop, get our bus, and add the signal handler
loop = GObject.MainLoop()
bus = SystemBus()
manager = bus.get(".NetworkManager")
manager.onPropertiesChanged = self._vpn_signal_handler
# Loop forever
loop.run()
def _vpn_signal_handler(self, args):
"""Called on NetworkManager PropertiesChanged signal"""
# Args is a dictionary of changed properties
# We only care about changes in ActiveConnections
active = "ActiveConnections"
# Compare current ActiveConnections to last seen ActiveConnections
if active in args and sorted(self.active) != sorted(args[active]):
self.active = args[active]
self.py3.update()
def _get_vpn_status(self):
"""Returns None if no VPN active, Id if active."""
# Sleep for a bit to let any changes in state finish
sleep(0.3)
# Check if any active connections are a VPN
bus = SystemBus()
ids = []
for name in self.active:
conn = bus.get(".NetworkManager", name)
if conn.Vpn:
ids.append(conn.Id)
# No active VPN
return ids
def _check_pid(self):
"""Returns True if pidfile exists, False otherwise."""
return Path(self.pidfile).is_file()
# Method run by py3status
def vpn_status(self):
"""Returns response dict"""
# Start signal handler thread if it should be running
if not self.check_pid and not self.thread_started:
self._start_handler_thread()
# Set color_bad as default output. Replaced if VPN active.
name = None
color = self.py3.COLOR_BAD
# If we are acting like the default i3status module
if self.check_pid:
if self._check_pid():
name = "yes"
color = self.py3.COLOR_GOOD
# Otherwise, find the VPN name, if it is active
else:
vpn = self._get_vpn_status()
if vpn:
name = ", ".join(vpn)
color = self.py3.COLOR_GOOD
# Format and create the response dict
full_text = self.py3.safe_format(self.format, {"name": name})
response = {
"full_text": full_text,
"color": color,
"cached_until": self.py3.CACHE_FOREVER,
}
# Cache forever unless in check_pid mode
if self.check_pid:
response["cached_until"] = self.py3.time_in(self.cache_timeout)
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
bsd-3-clause
| -3,124,280,615,940,087,000
| 29.205298
| 80
| 0.612146
| false
| 4.007909
| false
| false
| false
|
uncommoncode/uscpy
|
stabilizer.py
|
1
|
1757
|
"""
Image stabilization code.
"""
import argparse
import cv2
import uscpy.sequence
import uscpy.frame
import uscpy.video
parser = argparse.ArgumentParser(description="Perform image stabalization to a video")
parser.add_argument("input", help="input video path")
parser.add_argument("output", help="output video path")
parser.add_argument("--encoder", default="rawrgb", help="output video encoder. supported formats: %s" % (uscpy.video.FORMAT_TABLE.keys()))
args = parser.parse_args()
if args.encoder not in uscpy.video.FORMAT_TABLE:
raise Exception("Encoding format '%s' not supported.")
vc = cv2.VideoCapture(args.input)
if not vc.isOpened():
raise Exception("Error opening video input '%s'" % args.input)
width = int(vc.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = vc.get(cv2.cv.CV_CAP_PROP_FPS)
fourcc = int(vc.get(cv2.cv.CV_CAP_PROP_FOURCC))
print("video-input:")
print(" width: %d\n height: %d\n fps: %d" % (width, height, fps))
print("video-output:")
print(" format: %s" % (args.encoder))
encoder_format = uscpy.video.FORMAT_TABLE[args.encoder]
vw = cv2.VideoWriter(args.output, encoder_format, fps, (width, height), True)
if not vw.isOpened():
raise Exception("Error opening video output '%s'" % args.output)
vc_sequence = uscpy.sequence.video_capture(vc)
greyscale_sequence = uscpy.sequence.processor(vc_sequence, uscpy.frame.greyscale)
stable_sequence = uscpy.sequence.phase_stabilize(greyscale_sequence)
frame_count = 0
for frame in stable_sequence:
# save each frame to disk
bgr_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
vw.write(bgr_frame)
if (frame_count % fps) == 0:
print("rendered-frame: %d" % frame_count)
frame_count += 1
|
mit
| 4,128,069,966,771,620,000
| 33.45098
| 138
| 0.715424
| false
| 2.938127
| false
| false
| false
|
rishig/zulip
|
zerver/management/commands/show_unreads.py
|
1
|
2690
|
from argparse import ArgumentParser
from typing import Any, Dict, List, Set
from django.core.management.base import CommandError
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.topic_mutes import build_topic_mute_checker
from zerver.models import Recipient, Subscription, UserMessage, UserProfile
def get_unread_messages(user_profile: UserProfile) -> List[Dict[str, Any]]:
user_msgs = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient__type=Recipient.STREAM
).extra(
where=[UserMessage.where_unread()]
).values(
'message_id',
'message__subject',
'message__recipient_id',
'message__recipient__type_id',
).order_by("message_id")
result = [
dict(
message_id=row['message_id'],
topic=row['message__subject'],
stream_id=row['message__recipient__type_id'],
recipient_id=row['message__recipient_id'],
)
for row in list(user_msgs)]
return result
def get_muted_streams(user_profile: UserProfile, stream_ids: Set[int]) -> Set[int]:
rows = Subscription.objects.filter(
user_profile=user_profile,
recipient__type_id__in=stream_ids,
is_muted=True,
).values(
'recipient__type_id'
)
muted_stream_ids = {
row['recipient__type_id']
for row in rows}
return muted_stream_ids
def show_all_unread(user_profile: UserProfile) -> None:
unreads = get_unread_messages(user_profile)
stream_ids = {row['stream_id'] for row in unreads}
muted_stream_ids = get_muted_streams(user_profile, stream_ids)
is_topic_muted = build_topic_mute_checker(user_profile)
for row in unreads:
row['stream_muted'] = row['stream_id'] in muted_stream_ids
row['topic_muted'] = is_topic_muted(row['recipient_id'], row['topic'])
row['before'] = row['message_id'] < user_profile.pointer
for row in unreads:
print(row)
class Command(ZulipBaseCommand):
help = """Show unread counts for a particular user."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('email', metavar='<email>', type=str,
help='email address to spelunk')
self.add_realm_args(parser)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
email = options['email']
try:
user_profile = self.get_user(email, realm)
except CommandError:
print("e-mail %s doesn't exist in the realm %s, skipping" % (email, realm))
return
show_all_unread(user_profile)
|
apache-2.0
| -5,050,300,999,581,726,000
| 31.409639
| 87
| 0.628253
| false
| 3.730929
| false
| false
| false
|
vicnet/weboob
|
weboob/tools/pdf.py
|
1
|
16156
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Oleg Plakhotniuk
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from collections import namedtuple
import logging
import os
import subprocess
from tempfile import mkstemp
from .compat import range
__all__ = ['decompress_pdf', 'get_pdf_rows']
def decompress_pdf(inpdf):
"""
Takes PDF file contents as a string and returns decompressed version
of the file contents, suitable for text parsing.
External dependencies:
MuPDF (http://www.mupdf.com).
"""
inh, inname = mkstemp(suffix='.pdf')
outh, outname = mkstemp(suffix='.pdf')
os.write(inh, inpdf)
os.close(inh)
os.close(outh)
subprocess.call(['mutool', 'clean', '-d', inname, outname])
with open(outname, 'rb') as f:
outpdf = f.read()
os.remove(inname)
os.remove(outname)
return outpdf
Rect = namedtuple('Rect', ('x0', 'y0', 'x1', 'y1'))
TextRect = namedtuple('TextRect', ('x0', 'y0', 'x1', 'y1', 'text'))
def almost_eq(a, b):
return abs(a - b) < 2
def lt_to_coords(obj, ltpage):
# in a pdf, 'y' coords are bottom-to-top
# in a pdf, coordinates are very often almost equal but not strictly equal
x0 = (min(obj.x0, obj.x1))
y0 = (min(ltpage.y1 - obj.y0, ltpage.y1 - obj.y1))
x1 = (max(obj.x0, obj.x1))
y1 = (max(ltpage.y1 - obj.y0, ltpage.y1 - obj.y1))
x0 = round(x0)
y0 = round(y0)
x1 = round(x1)
y1 = round(y1)
# in a pdf, straight lines are actually rects, make them as thin as possible
if almost_eq(x1, x0):
x1 = x0
if almost_eq(y1, y0):
y1 = y0
return Rect(x0, y0, x1, y1)
def lttext_to_multilines(obj, ltpage):
# text lines within 'obj' are probably the same height
x0 = (min(obj.x0, obj.x1))
y0 = (min(ltpage.y1 - obj.y0, ltpage.y1 - obj.y1))
x1 = (max(obj.x0, obj.x1))
y1 = (max(ltpage.y1 - obj.y0, ltpage.y1 - obj.y1))
lines = obj.get_text().rstrip('\n').split('\n')
h = (y1 - y0) / len(lines)
for n, line in enumerate(lines):
yield TextRect((x0), (y0 + n * h), (x1), (y0 + n * h + h), line)
# fuzzy floats to smooth comparisons because lines are actually rects
# and seemingly-contiguous lines are actually not contiguous
class ApproxFloat(float):
def __eq__(self, other):
return almost_eq(self, other)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self - other < 0 and self != other
def __le__(self, other):
return self - other <= 0 or self == other
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
ANGLE_VERTICAL = 0
ANGLE_HORIZONTAL = 1
ANGLE_OTHER = 2
def angle(r):
if r.x0 == r.x1:
return ANGLE_VERTICAL
elif r.y0 == r.y1:
return ANGLE_HORIZONTAL
return ANGLE_OTHER
class ApproxVecDict(dict):
# since coords are never strictly equal, search coords around
# store vectors and points
def __getitem__(self, coords):
x, y = coords
for i in (0, -1, 1):
for j in (0, -1, 1):
try:
return super(ApproxVecDict, self).__getitem__((x+i, y+j))
except KeyError:
pass
raise KeyError()
def get(self, k, v=None):
try:
return self[k]
except KeyError:
return v
class ApproxRectDict(dict):
# like ApproxVecDict, but store rects
def __getitem__(self, coords):
x0, y0, x1, y1 = coords
for i in (0, -1, 1):
for j in (0, -1, 1):
if x0 == x1:
for j2 in (0, -1, 1):
try:
return super(ApproxRectDict, self).__getitem__((x0+i, y0+j, x0+i, y1+j2))
except KeyError:
pass
elif y0 == y1:
for i2 in (0, -1, 1):
try:
return super(ApproxRectDict, self).__getitem__((x0+i, y0+j, x1+i2, y0+j))
except KeyError:
pass
else:
return super(ApproxRectDict, self).__getitem__((x0, y0, x1, y1))
raise KeyError()
def uniq_lines(lines):
new = ApproxRectDict()
for line in lines:
line = tuple(line)
try:
new[line]
except KeyError:
new[line] = None
return [Rect(*k) for k in new.keys()]
def build_rows(lines):
points = ApproxVecDict()
# for each top-left point, build tuple with lines going down and lines going right
for line in lines:
a = angle(line)
if a not in (ANGLE_HORIZONTAL, ANGLE_VERTICAL):
continue
coord = (line.x0, line.y0)
plines = points.get(coord)
if plines is None:
plines = points[coord] = tuple([] for _ in range(2))
plines[a].append(line)
boxes = ApproxVecDict()
for plines in points.values():
if not (plines[ANGLE_HORIZONTAL] and plines[ANGLE_VERTICAL]):
continue
plines[ANGLE_HORIZONTAL].sort(key=lambda l: (l.y0, l.x1))
plines[ANGLE_VERTICAL].sort(key=lambda l: (l.x0, l.y1))
for hline in plines[ANGLE_HORIZONTAL]:
try:
vparallels = points[hline.x1, hline.y0][ANGLE_VERTICAL]
except KeyError:
continue
if not vparallels:
continue
for vline in plines[ANGLE_VERTICAL]:
try:
hparallels = points[vline.x0, vline.y1][ANGLE_HORIZONTAL]
except KeyError:
continue
if not hparallels:
continue
hparallels = [hpar for hpar in hparallels if almost_eq(hpar.x1, hline.x1)]
if not hparallels:
continue
vparallels = [vpar for vpar in vparallels if almost_eq(vpar.y1, vline.y1)]
if not vparallels:
continue
assert len(hparallels) == 1 and len(vparallels) == 1
assert almost_eq(hparallels[0].y0, vparallels[0].y1)
assert almost_eq(vparallels[0].x0, hparallels[0].x1)
box = Rect(hline.x0, hline.y0, hline.x1, vline.y1)
boxes.setdefault((vline.y0, vline.y1), []).append(box)
rows = list(boxes.values())
new_rows = []
for row in rows:
row.sort(key=lambda box: box.x0)
if row:
row = [row[0]] + [c for n, c in enumerate(row[1:], 1) if row[n-1].x0 != c.x0]
new_rows.append(row)
rows = new_rows
rows.sort(key=lambda row: row[0].y0)
return rows
def find_in_table(rows, rect):
for j, row in enumerate(rows):
if ApproxFloat(row[0].y0) > rect.y1:
break
if not (ApproxFloat(row[0].y0) <= rect.y0 and ApproxFloat(row[0].y1) >= rect.y1):
continue
for i, box in enumerate(row):
if ApproxFloat(box.x0) <= rect.x0 and ApproxFloat(box.x1) >= rect.x1:
return i, j
def arrange_texts_in_rows(rows, trects):
table = [[[] for _ in row] for row in rows]
for trect in trects:
pos = find_in_table(rows, trect)
if not pos:
continue
table[pos[1]][pos[0]].append(trect.text)
return table
LOGGER = logging.getLogger('pdf')
DEBUGFILES = logging.DEBUG - 1
def get_pdf_rows(data, miner_layout=True):
"""
Takes PDF file content as string and yield table row data for each page.
For each page in the PDF, the function yields a list of rows.
Each row is a list of cells. Each cell is a list of strings present in the cell.
Note that the rows may belong to different tables.
There are no logic tables in PDF format, so this parses PDF drawing instructions
and tries to find rectangles and arrange them in rows, then arrange text in
the rectangles.
External dependencies:
PDFMiner (http://www.unixuser.org/~euske/python/pdfminer/index.html).
"""
try:
from pdfminer.pdfparser import PDFParser, PDFSyntaxError
except ImportError:
raise ImportError('Please install python-pdfminer')
try:
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
newapi = True
except ImportError:
from pdfminer.pdfparser import PDFDocument
newapi = False
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTRect, LTTextBox, LTTextLine, LTLine, LTChar, LTCurve
parser = PDFParser(BytesIO(data))
try:
if newapi:
doc = PDFDocument(parser)
else:
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
except PDFSyntaxError:
return
rsrcmgr = PDFResourceManager()
if miner_layout:
device = PDFPageAggregator(rsrcmgr, laparams=LAParams())
else:
device = PDFPageAggregator(rsrcmgr)
interpreter = PDFPageInterpreter(rsrcmgr, device)
if newapi:
pages = PDFPage.get_pages(BytesIO(data), check_extractable=True)
else:
doc.initialize()
pages = doc.get_pages()
if LOGGER.isEnabledFor(DEBUGFILES):
import tempfile
import PIL.Image as Image
import PIL.ImageDraw as ImageDraw
import random
path = tempfile.mkdtemp(prefix='pdf')
for npage, page in enumerate(pages):
LOGGER.debug('processing page %s', npage)
interpreter.process_page(page)
page_layout = device.get_result()
texts = sum([list(lttext_to_multilines(obj, page_layout)) for obj in page_layout._objs if isinstance(obj, (LTTextBox, LTTextLine, LTChar))], [])
LOGGER.debug('found %d text objects', len(texts))
if LOGGER.isEnabledFor(DEBUGFILES):
img = Image.new('RGB', (int(page.mediabox[2]), int(page.mediabox[3])), (255, 255, 255))
draw = ImageDraw.Draw(img)
for t in texts:
color = (random.randint(127, 255), random.randint(127, 255), random.randint(127, 255))
draw.rectangle((t.x0, t.y0, t.x1, t.y1), outline=color)
draw.text((t.x0, t.y0), t.text.encode('utf-8'), color)
fpath = '%s/1text-%03d.png' % (path, npage)
img.save(fpath)
LOGGER.log(DEBUGFILES, 'saved %r', fpath)
if not miner_layout:
texts.sort(key=lambda t: (t.y0, t.x0))
# TODO filter ltcurves that are not lines?
# TODO convert rects to 4 lines?
lines = [lt_to_coords(obj, page_layout) for obj in page_layout._objs if isinstance(obj, (LTRect, LTLine, LTCurve))]
LOGGER.debug('found %d lines', len(lines))
if LOGGER.isEnabledFor(DEBUGFILES):
img = Image.new('RGB', (int(page.mediabox[2]), int(page.mediabox[3])), (255, 255, 255))
draw = ImageDraw.Draw(img)
for l in lines:
color = (random.randint(127, 255), random.randint(127, 255), random.randint(127, 255))
draw.rectangle((l.x0, l.y0, l.x1, l.y1), outline=color)
fpath = '%s/2lines-%03d.png' % (path, npage)
img.save(fpath)
LOGGER.log(DEBUGFILES, 'saved %r', fpath)
lines = list(uniq_lines(lines))
LOGGER.debug('found %d unique lines', len(lines))
rows = build_rows(lines)
LOGGER.debug('built %d rows (%d boxes)', len(rows), sum(len(row) for row in rows))
if LOGGER.isEnabledFor(DEBUGFILES):
img = Image.new('RGB', (int(page.mediabox[2]), int(page.mediabox[3])), (255, 255, 255))
draw = ImageDraw.Draw(img)
for r in rows:
for b in r:
color = (random.randint(127, 255), random.randint(127, 255), random.randint(127, 255))
draw.rectangle((b.x0 + 1, b.y0 + 1, b.x1 - 1, b.y1 - 1), outline=color)
fpath = '%s/3rows-%03d.png' % (path, npage)
img.save(fpath)
LOGGER.log(DEBUGFILES, 'saved %r', fpath)
textrows = arrange_texts_in_rows(rows, texts)
LOGGER.debug('assigned %d strings', sum(sum(len(c) for c in r) for r in textrows))
if LOGGER.isEnabledFor(DEBUGFILES):
img = Image.new('RGB', (int(page.mediabox[2]), int(page.mediabox[3])), (255, 255, 255))
draw = ImageDraw.Draw(img)
for row, trow in zip(rows, textrows):
for b, tlines in zip(row, trow):
color = (random.randint(127, 255), random.randint(127, 255), random.randint(127, 255))
draw.rectangle((b.x0 + 1, b.y0 + 1, b.x1 - 1, b.y1 - 1), outline=color)
draw.text((b.x0 + 1, b.y0 + 1), '\n'.join(tlines).encode('utf-8'), color)
fpath = '%s/4cells-%03d.png' % (path, npage)
img.save(fpath)
LOGGER.log(DEBUGFILES, 'saved %r', fpath)
yield textrows
device.close()
# Export part #
def html_to_pdf(browser, url=None, data=None, extra_options=None):
"""
Convert html to PDF.
:param browser: browser instance
:param url: link to the html ressource
:param data: HTML content
:return: the document converted in PDF
:rtype: bytes
"""
try:
import pdfkit # https://pypi.python.org/pypi/pdfkit
except ImportError:
raise ImportError('Please install python-pdfkit')
assert (url or data) and not (url and data), 'Please give only url or data parameter'
callback = pdfkit.from_url if url else pdfkit.from_string
options = {}
try:
cookies = browser.session.cookies
except AttributeError:
pass
else:
options.update({
'cookie': [(cookie, value) for cookie, value in cookies.items() if value], # cookies of browser
})
if extra_options:
options.update(extra_options)
return callback(url or data, False, options=options)
# extract all text from PDF
def extract_text(data):
try:
try:
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
newapi = True
except ImportError:
from pdfminer.pdfparser import PDFDocument
newapi = False
from pdfminer.pdfparser import PDFParser, PDFSyntaxError
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
except ImportError:
raise ImportError('Please install python-pdfminer to parse PDF')
else:
parser = PDFParser(BytesIO(data))
try:
if newapi:
doc = PDFDocument(parser)
else:
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
except PDFSyntaxError:
return
rsrcmgr = PDFResourceManager()
out = BytesIO()
device = TextConverter(rsrcmgr, out)
interpreter = PDFPageInterpreter(rsrcmgr, device)
if newapi:
pages = PDFPage.create_pages(doc)
else:
doc.initialize()
pages = doc.get_pages()
for page in pages:
interpreter.process_page(page)
return out.getvalue()
|
lgpl-3.0
| -273,186,497,165,296,580
| 31.441767
| 152
| 0.581703
| false
| 3.550769
| false
| false
| false
|
istvanzk/RasPiConnectServer
|
ExecuteFiles/ExecuteServerStatus.py
|
1
|
1371
|
#!/usr/local/bin/python3
# Filename: ExecuteServerStatus.py
# Version 2.7 07/29/13 RV MiloCreek
# Version 3.0 04.04.2016 IzK (Python3.4+)
import Config
import subprocess
import xml.etree.ElementTree as ET
import Validate
import BuildResponse
import time
if (Config.i2c_demo()):
from pyblinkm import BlinkM, Scripts
def Execute_Server_Status(root):
# find the interface object type
objectServerID = root.find("./OBJECTSERVERID").text
objectFlags = root.find("./OBJECTFLAGS").text
validate = Validate.checkForValidate(root)
if (Config.debug()):
print("VALIDATE=%s" % validate)
outgoingXMLData = BuildResponse.buildHeader(root)
if (Config.debug()):
print("objectServerID = %s" % objectServerID)
# we have the objectServerID so now we can choose the correct
# program
if (objectServerID == "SS-1"):
#check for validate request
if (validate == "YES"):
outgoingXMLData += Validate.buildValidateResponse("YES")
outgoingXMLData += BuildResponse.buildFooter()
return outgoingXMLData
responseData = "2"
outgoingXMLData += BuildResponse.buildResponse(responseData)
else:
# invalid RaspiConnect Code
outgoingXMLData += Validate.buildValidateResponse("NO")
outgoingXMLData += BuildResponse.buildFooter()
if (Config.debug()):
print(outgoingXMLData)
return outgoingXMLData
# End of ExecuteServerStatus.py
|
gpl-3.0
| -6,192,896,473,730,953,000
| 17.28
| 62
| 0.736689
| false
| 3.36855
| false
| false
| false
|
linyc74/WinduVision
|
threads/process_thread.py
|
1
|
9447
|
import numpy as np
import cv2, time, sys, threading, json
from constants import *
from abstract_thread import *
from stereo import Stereo as stereo
class ProcessThread(AbstractThread):
def __init__(self, cap_thread_R, cap_thread_L, mediator):
super(ProcessThread, self).__init__()
self.cap_thread_R = cap_thread_R
self.cap_thread_L = cap_thread_L
self.mediator = mediator
self.__init__parms()
self.set_fps(30.0)
self.connect_signals(mediator, ['display_image', 'set_info_text'])
def __init__parms(self):
# Parameters for image processing
self.offset_x, self.offset_y = 0, 0
self.zoom = 1.0
with open('parameters/gui.json', 'r') as fh:
gui_parms = json.loads(fh.read())
w = gui_parms['default_width']
h = gui_parms['default_height']
self.set_display_size(w, h)
self.set_resize_matrix()
# Parameters for stereo depth map
self.ndisparities = 32 # Must be divisible by 16
self.SADWindowSize = 31 # Must be odd, be within 5..255 and be not larger than image width or height
# Parameters for control and timing
self.computingDepth = False
self.t_series = [time.time() for i in range(30)]
def set_display_size(self, width, height):
'''
Define the dimension of self.img_display, which is the terminal image to be displayed in the GUI.
'''
self.display_width = width
self.display_height = height
# Define the dimensions of:
# self.imgR_proc --- processed R image to be accessed externally
# self.imgL_proc --- L image
# self.img_display --- display image to be emitted to the GUI object
rows, cols = height, width
self.imgR_proc = np.zeros((rows, cols/2, 3), np.uint8)
self.imgL_proc = np.zeros((rows, cols/2, 3), np.uint8)
self.img_display = np.zeros((rows, cols , 3), np.uint8)
def set_resize_matrix(self):
'''
Define the transformation matrix for the image processing pipeline.
'''
img = self.cap_thread_R.get_image()
img_height, img_width, _ = img.shape
display_height, display_width = self.display_height, self.display_width
# The height-to-width ratio
ratio_img = float(img_height) / img_width
ratio_display = float(display_height) / (display_width / 2)
# The base scale factor is the ratio of display size / image size,
# which scales the image to the size of the display.
if ratio_img > ratio_display:
base_scale = float(display_height) / img_height # Height is the limiting factor
else:
base_scale = float(display_width/2) / img_width # Width is the limiting factor
# The actual scale factor is the product of the base scale factor and the zoom factor.
scale_x = base_scale * self.zoom
scale_y = base_scale * self.zoom
# The translation distance for centering
# = half of the difference between
# the screen size and the zoomed image size
# ( ( display size ) - ( zoomed image size ) ) / 2
tx = ( (display_width / 2) - (img_width * scale_x) ) / 2
ty = ( (display_height ) - (img_height * scale_y) ) / 2
# Putting everything together into a matrix
Sx = scale_x
Sy = scale_y
Off_x = self.offset_x
Off_y = self.offset_y
# For the right image, it's only scaling and centering
self.resize_matrix_R = np.float32([ [Sx, 0 , tx] ,
[0 , Sy, ty] ])
# For the left image, in addition to scaling and centering, the offset is also applied.
self.resize_matrix_L = np.float32([ [Sx, 0 , Sx*Off_x + tx] ,
[0 , Sy, Sy*Off_y + ty] ])
def main(self):
'''
There are three major steps for the image processing pipeline,
with some additional steps in between.
( ) Check image dimensions.
(1) Eliminate offset of the left image.
(2) Resize and translate to place each image at the center of both sides of the view.
( ) Compute depth map (optional).
(3) Combine images.
'''
# Get the images from self.capture_thread
self.imgR_0 = self.cap_thread_R.get_image() # The suffix '_0' means raw input image
self.imgL_0 = self.cap_thread_L.get_image()
# Quick check on the image dimensions
# If not matching, skip all following steps
if not self.imgR_0.shape == self.imgL_0.shape:
self.mediator.emit_signal( signal_name = 'set_info_text',
arg = 'Image dimensions not identical.' )
time.sleep(0.1)
return
# (1) Eliminate offset of the left image.
# (2) Resize and translate to place each image at the center of both sides of the view.
rows, cols = self.display_height, self.display_width / 2 # Output image dimension
self.imgR_1 = cv2.warpAffine(self.imgR_0, self.resize_matrix_R, (cols, rows))
self.imgL_1 = cv2.warpAffine(self.imgL_0, self.resize_matrix_L, (cols, rows))
# Update processed images for external access
self.imgR_proc[:,:,:] = self.imgR_1[:,:,:]
self.imgL_proc[:,:,:] = self.imgL_1[:,:,:]
# Compute stereo depth map (optional)
if self.computingDepth:
self.imgL_1 = self.compute_depth()
# (3) Combine images.
h, w = self.display_height, self.display_width
self.img_display[:, 0:(w/2), :] = self.imgL_1
self.img_display[:, (w/2):w, :] = self.imgR_1
self.mediator.emit_signal( signal_name = 'display_image',
arg = self.img_display )
self.emit_fps_info()
def compute_depth(self):
imgL = stereo.compute_depth(self.imgR_1, self.imgL_1, self.ndisparities, self.SADWindowSize)
return imgL
def emit_fps_info(self):
'''
Emits real-time frame-rate info to the gui
'''
# Shift time series by one
self.t_series[1:] = self.t_series[:-1]
# Get the current time -> First in the series
self.t_series[0] = time.time()
# Calculate frame rate
rate = len(self.t_series) / (self.t_series[0] - self.t_series[-1])
data = {'line': 3,
'text': 'Active process thread: {} fps'.format(rate)}
self.mediator.emit_signal( signal_name = 'set_info_text',
arg = data )
# Below are public methods for higher-level objects
def set_offset(self, offset_x, offset_y):
x_limit, y_limit = 100, 100
if abs(offset_x) > x_limit or abs(offset_y) > y_limit:
self.offset_x, self.offset_y = 0, 0
else:
self.offset_x, self.offset_y = offset_x, offset_y
self.set_resize_matrix()
def detect_offset(self):
'''
1) Read right and left images from the cameras.
2) Use correlation function to calculate the offset.
'''
imgR = self.cap_thread_R.get_image()
imgL = self.cap_thread_L.get_image()
imgR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
imgL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
if not imgR.shape == imgL.shape:
return
# Define ROI of the left image
row, col = imgL.shape
a = int(row*0.25)
b = int(row*0.75)
c = int(col*0.25)
d = int(col*0.75)
roiL = np.float32( imgL[a:b, c:d] )
mat = cv2.matchTemplate(image = np.float32(imgR) ,
templ = roiL ,
method = cv2.TM_CCORR_NORMED)
# Vertical alignment, should always be done
y_max = cv2.minMaxLoc(mat)[3][1]
offset_y = y_max - row / 4
# Horizontal alignment, for infinitely far objects
x_max = cv2.minMaxLoc(mat)[3][0]
offset_x = x_max - col / 4
return offset_x, offset_y
def zoom_in(self):
if self.zoom * 1.01 < 2.0:
self.zoom = self.zoom * 1.01
self.set_resize_matrix()
def zoom_out(self):
if self.zoom / 1.01 > 0.5:
self.zoom = self.zoom / 1.01
self.set_resize_matrix()
def apply_depth_parameters(self, parameters):
"""
Args:
parameters: a dictionary with
key: str, parameter name
value: int, parameter value
"""
for key, value in parameters.items():
setattr(self, key, value)
def change_display_size(self, width, height):
self.pause()
self.set_display_size(width, height)
self.set_resize_matrix()
self.resume()
def get_processed_images(self):
return self.imgR_proc, self.imgL_proc
def get_display_image(self):
return self.img_display
def set_cap_threads(self, thread_R, thread_L):
self.pause()
self.cap_thread_R = thread_R
self.cap_thread_L = thread_L
# The input image dimension could be different after switching camera
# So reset resize matrix
self.set_resize_matrix()
self.resume()
|
mit
| -2,138,509,404,374,302,500
| 32.147368
| 108
| 0.565471
| false
| 3.641866
| false
| false
| false
|
pibroch/ocfs2-test
|
programs/write_torture/write_torture.py
|
1
|
4399
|
#!/usr/bin/env python
#
#
# Copyright (C) 2006 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a c.of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 021110-1307, USA.
#
# XXX: Future improvements:
#
# Program : write_torture.py
# Description : Interface to run write_torture. Will validate parameters and
# properly configure LAM/MPI and start it before starting
# the write_torture program. This progran will run on each
# node.
# Author : Marcos E. Matsunaga
#
import os, stat, sys, time, optparse, socket, string, o2tf, pdb, timing, config
import random
#
#pdb.set_trace()
#
#args = sys.argv[1:]
#
MINBLOCKSIZE = 512
MAXBLOCKSIZE = 8192
#
DEBUGON = os.getenv('DEBUG',0)
#
EXECPGM = os.path.join(config.BINDIR,'write_torture')
#
uname = os.uname()
lhostname = str(socket.gethostname())
numnodes = 0
logfile = config.LOGFILE
blocksize = '512,4096'
seconds = 60
#
Usage = '\n %prog [-b|--blocksize] \
[-f | --filename <fullpath filename>] \
[-l | --logfile logfilename] \
[-s | --seconds seconds] \
[-u | --uniquefile] \
[-h|--help]'
#
# FUNCTIONS
#
#
# MAIN
#
if __name__=='__main__':
parser = optparse.OptionParser(usage=Usage)
#
parser.add_option('-b',
'--blocksize',
dest='blocksize',
type='string',
help='Blocksize interval that will be during test. \
Range from 512 to 8192 bytes (Format:xxx,yyy).')
#
parser.add_option('-f',
'--filename',
dest='filename',
type='string',
help='Filename that will be used during test.')
#
parser.add_option('-l',
'--logfile',
dest='logfile',
type='string',
help='Logfile used by the process.')
#
parser.add_option('-s',
'--seconds',
dest='seconds',
type='int',
help='Number of seconds the test will run (def. 60).')
#
parser.add_option('-u',
'--uniquefile',
action="store_true",
dest='uniquefile',
default=False)
#
(options, args) = parser.parse_args()
if len(args) != 0:
o2tf.printlog('args left %s' % len(args), logfile, 0, '')
parser.error('incorrect number of arguments')
#
if options.blocksize:
blocksize = options.blocksize
blockvalues = blocksize.split(',')
if len(blockvalues) != 2:
o2tf.printlog('Blocksize must be specified in format xxx,yyy\n\n',
logfile,
0,
'')
parser.error('Invalid format.')
else:
parser.error('Blocksize parameter needs to be specified.')
if int(blockvalues[0]) < MINBLOCKSIZE or int(blockvalues[1]) > MAXBLOCKSIZE:
o2tf.printlog('Blocksize must be between %s and %s\n\n' % \
(MINBLOCKSIZE, MAXBLOCKSIZE),
logfile,
0,
'')
parser.error('Invalid range.')
if DEBUGON:
o2tf.printlog('Blocksize range from %s to %s\n\n' % \
(str(blockvalues[0]), str(blockvalues[1])),
logfile,
0,
'')
#
if options.filename:
filename = options.filename
else:
parser.error('filename parameter needs to be specified.')
#
if options.logfile:
logfile = options.logfile
#
if options.seconds:
seconds = options.seconds
#
print options.uniquefile
if not options.uniquefile:
filename = options.filename + '_' + lhostname + '_' + str(os.getpid())
#
BLKSZ = random.randint(int(blockvalues[0]), int(blockvalues[1]))
cmd = (EXECPGM + ' -s %s -b %s %s 2>&1 | tee -a %s' %
(seconds, BLKSZ, filename, logfile))
if DEBUGON:
o2tf.printlog('write_torture: main - current directory %s' % os.getcwd(),
logfile,
0,
'')
o2tf.printlog('write_torture: main - filename = %s' % filename,
logfile,
0,
'')
o2tf.printlog('write_torture: main - BLKSZ = %s' %
BLKSZ,
logfile,
0,
'')
t1 = time.time()
if DEBUGON:
o2tf.printlog('write_torture: main - cmd = %s' % cmd,
logfile,
0,
'')
RC = os.system(cmd)
t2 = time.time()
if DEBUGON:
o2tf.printlog('write_torture: elapsed time = %s - RC = %s' %
((t2 - t1), RC),
logfile,
0,
'')
#
sys.exit(RC)
|
gpl-2.0
| 5,714,742,131,041,537,000
| 23.713483
| 79
| 0.661287
| false
| 2.950369
| false
| false
| false
|
dgilman/atom_maker
|
schema.py
|
1
|
2566
|
# How to update the database schema
# 1. Create a new function that takes a sqlite3 db connection as an argument.
# 2. Have the function update schema and increase the user version, preferrably in a transaction
# 3. Put your function in the upgrade dict in check(). Its key is the schema version it is upgrading from.
# 4. Increase SCHEMA_VERSION at the top of this file
# 5. Submit a pull request!
SCHEMA_VERSION = 4
def create_initial_format(c):
"""Schema ver 0 to 1
create tables for main cache and bugzilla real name cache"""
c.executescript("""BEGIN TRANSACTION;
create table if not exists cache (qs text primary key, ts timestamp, feed text);
create table if not exists bugzillas (id integer primary key, url text unique);
create table if not exists bugzilla_users (email text, name text, ts integer, bz integer, foreign key(bz) references bugzillas(id));
create index if not exists bugzilla_user_ts_index on bugzilla_users (ts asc);
pragma user_version = 1;
END TRANSACTION;""")
def create_bugzilla_email_index(c):
"""Create an index for the monster cache miss query. Rename bugzilla_user -> bugzilla_users"""
c.executescript("""BEGIN TRANSACTION;
drop index if exists bugzilla_user_ts_index;
create index if not exists bugzilla_users_ts_index on bugzilla_users (ts asc);
create index if not exists bugzilla_users_bz_email_index on bugzilla_users (bz, email);
pragma user_version = 2;
END TRANSACTION;""")
def create_twitter_tokens_table(c):
"""Creates a table to store marshalled twitter tokens"""
c.executescript("""BEGIN TRANSACTION;
create table if not exists twitter_tokens (name text unique not null, key text not null, secret text not null);
pragma user_version = 3;
END TRANSACTION;""")
def cache_text_to_blob(c):
"""Change the cache table to store cached feeds as blob"""
c.executescript("""BEGIN TRANSACTION;
drop table if exists cache;
create table if not exists cache (qs text primary key, ts timestamp, feed blob);
pragma user_version = 4;
END TRANSACTION;""")
def check(c):
#XXX there is a race condition here
upgrade = {0: create_initial_format,
1: create_bugzilla_email_index,
2: create_twitter_tokens_table,
3: cache_text_to_blob}
ver = lambda: c.execute("pragma user_version").fetchall()[0][0]
while ver() < SCHEMA_VERSION:
upgrade[ver()](c)
def init():
import sqlite3
conn = sqlite3.connect("cache.sqlite3", detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
c.execute("pragma foreign_keys = 1")
check(c)
return conn, c
|
gpl-3.0
| -7,401,853,332,173,332,000
| 40.387097
| 132
| 0.721746
| false
| 3.62942
| false
| false
| false
|
droundy/fac
|
tests/getting-started.py
|
1
|
3162
|
#!/usr/bin/python3
import sys, os, re, subprocess
if sys.version_info < (3,5):
print('Please run this script with python 3.5 or newer:', sys.version)
exit(137)
runre = re.compile(r'\[run\]: # \((.+)\)')
shellre = re.compile(r'^ \$ (.+)')
filere = re.compile(r'##### (.+)')
verbre = re.compile(r'^ (.*)')
time_remaining_re = re.compile(r'^Build time remaining: ')
with open(sys.argv[1]) as f:
for line in f:
isfile = filere.findall(line)
isshell = shellre.findall(line)
if len(isfile) > 0:
with open(isfile[0], 'w') as newf:
for line in f:
isverb = verbre.findall(line)
if len(isverb) == 0:
break
newf.write(isverb[0])
newf.write('\n')
print(isfile[0], ':', isverb[0])
elif len(isshell) > 0:
print('shell :', isshell[0])
tocheck = True
if isshell[0][-len('# fails'):] == '# fails':
tocheck = False
print('SHOULD FAIL!')
isshell[0] = isshell[0][:-len('# fails')]
ret = subprocess.run(isshell, shell=True,
stderr=subprocess.STDOUT,
check=tocheck,
stdout=subprocess.PIPE)
if not tocheck and ret.returncode == 0:
print("DID NOT FAIL!!!")
exit(1)
print('output:', ret.stdout)
output = ret.stdout
for outline in output.decode('utf-8').split('\n'):
# The time_remaining_re bit is needed to skip the
# "Build time remaining:" lines that get printed every
# once in a while. These are irregular, which is why
# we need to do this.
if len(outline)>0 and not time_remaining_re.match(outline):
print('output:', outline)
expectedline = f.readline()
if len(verbre.findall(expectedline)) == 0:
print('unexpected output from:', isshell[0])
print('output is', outline)
exit(1)
if expectedline in [' ...', ' ...\n']:
print('I expected random output.')
break
expected = verbre.findall(expectedline)[0]
expected = expected.replace('.', r'\.')
expected = expected.replace('*', r'\*')
expected = expected.replace(r'\.\.\.', '.*')
expected = expected.replace('[', r'\[')
expected = expected.replace(']', r'\]')
expected = expected.replace('(', r'\(')
expected = expected.replace(')', r'\)')
if not re.compile(expected).match(outline):
print('I expected:', expected)
print('but instead I got:', outline)
exit(1)
else:
print('input', line.strip())
|
gpl-2.0
| 5,235,296,058,472,813,000
| 42.916667
| 75
| 0.448767
| false
| 4.428571
| false
| false
| false
|
wakalixes/sqldataplot
|
plugins/pluginFitConstant.py
|
1
|
1392
|
#--------------------------------------------------
# Revision = $Rev: 13 $
# Date = $Date: 2011-07-31 00:39:24 +0200 (Sun, 31 Jul 2011) $
# Author = $Author: stefan $
#--------------------------------------------------
from pluginInterfaces import PluginFit, Parameter,leastsqFit
import numpy as np
class PluginFitConstant(PluginFit):
def __init__(self):
pass
def fit(self, array, errarray, param, xmin=0, xmax=0, fitAxes=[]):
"""return the data that is needed for plotting the fitting result"""
self.params = [Parameter(v) for v in param]
def f(x): return self.params[0]()
self.simpleFitAllAxes(f, array, errarray, xmin, xmax, fitAxes)
return self.generateDataFromParameters(f,[np.amin(array[0,:]),np.amax(array[0,:])], np.size(fitAxes)+1, xmin, xmax, fitAxes)
def getInitialParameters(self,data):
"""find the best initial values and return them"""
#nothing todo for a linear fit
return [1,1]
def getParameters(self):
"""return the fit parameters"""
return np.array(["C"])
def getFitModelStr(self):
"""return a string of the implemented fitting model, i.e. 'linear fit (y=A*x +B)'"""
return "Constant, y=C"
def getResultStr(self):
"""return a special result, i.e. 'Frequency = blabla'"""
return "nothing fitted"
|
gpl-2.0
| -5,941,994,962,134,405,000
| 34.692308
| 130
| 0.572557
| false
| 3.721925
| false
| false
| false
|
qedsoftware/commcare-hq
|
corehq/apps/app_manager/models.py
|
1
|
221033
|
# coding=utf-8
"""
Application terminology
For any given application, there are a number of different documents.
The primary application document is an instance of Application. This
document id is what you'll see in the URL on most app manager pages. Primary
application documents should have `copy_of == None` and `is_released ==
False`. When an application is saved, the field `version` is incremented.
When a user makes a build of an application, a copy of the primary
application document is made. These documents are the "versions" you see on
the deploy page. Each build document will have a different id, and the
`copy_of` field will be set to the ID of the primary application document.
Additionally, some attachments such as `profile.xml` and `suite.xml` will be
created and saved to the build doc (see `create_all_files`).
When a build is starred, this is called "releasing" the build. The parameter
`is_released` will be set to True on the build document.
You might also run in to remote applications and applications copied to be
published on the exchange, but those are quite infrequent.
"""
import calendar
from distutils.version import LooseVersion
from itertools import chain
import tempfile
import os
import logging
import hashlib
import random
import json
import types
import re
import datetime
import uuid
from collections import defaultdict, namedtuple
from functools import wraps
from copy import deepcopy
from mimetypes import guess_type
from urllib2 import urlopen
from urlparse import urljoin
from couchdbkit import MultipleResultsFound
import itertools
from lxml import etree
from django.core.cache import cache
from django.utils.translation import override, ugettext as _, ugettext
from couchdbkit.exceptions import BadValueError
from corehq.apps.app_manager.suite_xml.utils import get_select_chain
from corehq.apps.app_manager.suite_xml.generator import SuiteGenerator, MediaSuiteGenerator
from corehq.apps.app_manager.xpath_validator import validate_xpath
from corehq.apps.userreports.exceptions import ReportConfigurationNotFoundError
from corehq.util.timezones.utils import get_timezone_for_domain
from dimagi.ext.couchdbkit import *
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from restkit.errors import ResourceError
from couchdbkit.resource import ResourceNotFound
from corehq import toggles, privileges
from corehq.blobs.mixin import BlobMixin
from corehq.const import USER_DATE_FORMAT, USER_TIME_FORMAT
from corehq.apps.analytics.tasks import track_workflow
from corehq.apps.app_manager.feature_support import CommCareFeatureSupportMixin
from corehq.util.quickcache import quickcache
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.couch import CriticalSection
from django_prbac.exceptions import PermissionDenied
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.commcare_settings import check_condition
from corehq.apps.app_manager.const import *
from corehq.apps.app_manager.xpath import (
dot_interpolate,
interpolate_xpath,
LocationXpath,
)
from corehq.apps.builds import get_default_build_spec
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.undo import DeleteRecord, DELETED_SUFFIX
from dimagi.utils.dates import DateSpan
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.make_uuid import random_hex
from dimagi.utils.web import get_url_base, parse_int
import commcare_translations
from corehq.util import bitly
from corehq.util import view_utils
from corehq.apps.appstore.models import SnapshotMixin
from corehq.apps.builds.models import BuildSpec, BuildRecord
from corehq.apps.hqmedia.models import HQMediaMixin
from corehq.apps.translations.models import TranslationMixin
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import cc_user_domain
from corehq.apps.domain.models import cached_property, Domain
from corehq.apps.app_manager import current_builds, app_strings, remote_app, \
id_strings, commcare_settings
from corehq.apps.app_manager.suite_xml import xml_models as suite_models
from corehq.apps.app_manager.dbaccessors import (
get_app,
get_latest_build_doc,
get_latest_released_app_doc,
domain_has_apps,
)
from corehq.apps.app_manager.util import (
split_path,
save_xform,
ParentCasePropertyBuilder,
is_usercase_in_use,
actions_use_usercase,
update_unique_ids,
app_callout_templates,
xpath_references_case,
xpath_references_user_case,
module_case_hierarchy_has_circular_reference)
from corehq.apps.app_manager.xform import XForm, parse_xml as _parse_xml, \
validate_xform
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from .exceptions import (
AppEditingError,
BlankXFormError,
ConflictingCaseTypeError,
FormNotFoundException,
IncompatibleFormTypeException,
LocationXpathValidationError,
ModuleNotFoundException,
ModuleIdMissingException,
RearrangeError,
SuiteValidationError,
VersioningError,
XFormException,
XFormIdNotUnique,
XFormValidationError,
ScheduleError,
CaseXPathValidationError,
UserCaseXPathValidationError,
)
from corehq.apps.reports.daterange import get_daterange_start_end_dates, get_simple_dateranges
from jsonpath_rw import jsonpath, parse
WORKFLOW_DEFAULT = 'default' # go to the app main screen
WORKFLOW_ROOT = 'root' # go to the module select screen
WORKFLOW_PARENT_MODULE = 'parent_module' # go to the parent module's screen
WORKFLOW_MODULE = 'module' # go to the current module's screen
WORKFLOW_PREVIOUS = 'previous_screen' # go to the previous screen (prior to entering the form)
WORKFLOW_FORM = 'form' # go straight to another form
ALL_WORKFLOWS = [
WORKFLOW_DEFAULT,
WORKFLOW_ROOT,
WORKFLOW_PARENT_MODULE,
WORKFLOW_MODULE,
WORKFLOW_PREVIOUS,
WORKFLOW_FORM,
]
DETAIL_TYPES = ['case_short', 'case_long', 'ref_short', 'ref_long']
FIELD_SEPARATOR = ':'
ATTACHMENT_REGEX = r'[^/]*\.xml'
ANDROID_LOGO_PROPERTY_MAPPING = {
'hq_logo_android_home': 'brand-banner-home',
'hq_logo_android_login': 'brand-banner-login',
}
def jsonpath_update(datum_context, value):
field = datum_context.path.fields[0]
parent = jsonpath.Parent().find(datum_context)[0]
parent.value[field] = value
# store a list of references to form ID's so that
# when an app is copied we can update the references
# with the new values
form_id_references = []
def FormIdProperty(expression, **kwargs):
"""
Create a StringProperty that references a form ID. This is necessary because
form IDs change when apps are copied so we need to make sure we update
any references to the them.
:param expression: jsonpath expression that can be used to find the field
:param kwargs: arguments to be passed to the underlying StringProperty
"""
path_expression = parse(expression)
assert isinstance(path_expression, jsonpath.Child), "only child path expressions are supported"
field = path_expression.right
assert len(field.fields) == 1, 'path expression can only reference a single field'
form_id_references.append(path_expression)
return StringProperty(**kwargs)
def _rename_key(dct, old, new):
if old in dct:
if new in dct and dct[new]:
dct["%s_backup_%s" % (new, hex(random.getrandbits(32))[2:-1])] = dct[new]
dct[new] = dct[old]
del dct[old]
@memoized
def load_app_template(slug):
path = os.path.join(os.path.dirname(__file__), 'static', 'app_manager', 'json', 'template_apps')
with open(os.path.join(path, slug + '.json')) as f:
return json.load(f)
@memoized
def load_case_reserved_words():
with open(os.path.join(os.path.dirname(__file__), 'static', 'app_manager', 'json', 'case-reserved-words.json')) as f:
return json.load(f)
@memoized
def load_form_template(filename):
with open(os.path.join(os.path.dirname(__file__), 'data', filename)) as f:
return f.read()
class IndexedSchema(DocumentSchema):
"""
Abstract class.
Meant for documents that appear in a list within another document
and need to know their own position within that list.
"""
def with_id(self, i, parent):
self._i = i
self._parent = parent
return self
@property
def id(self):
return self._i
def __eq__(self, other):
return other and (self.id == other.id) and (self._parent == other._parent)
class Getter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, instance):
items = getattr(instance, self.attr)
l = len(items)
for i, item in enumerate(items):
yield item.with_id(i % l, instance)
def __get__(self, instance, owner):
# thanks, http://metapython.blogspot.com/2010/11/python-instance-methods-how-are-they.html
# this makes Getter('foo') act like a bound method
return types.MethodType(self, instance, owner)
class FormActionCondition(DocumentSchema):
"""
The condition under which to open/update/close a case/referral
Either {'type': 'if', 'question': '/xpath/to/node', 'answer': 'value'}
in which case the action takes place if question has answer answer,
or {'type': 'always'} in which case the action always takes place.
"""
type = StringProperty(choices=["if", "always", "never"], default="never")
question = StringProperty()
answer = StringProperty()
operator = StringProperty(choices=['=', 'selected', 'boolean_true'], default='=')
def is_active(self):
return self.type in ('if', 'always')
class FormAction(DocumentSchema):
"""
Corresponds to Case XML
"""
condition = SchemaProperty(FormActionCondition)
def is_active(self):
return self.condition.is_active()
@classmethod
def get_action_paths(cls, action):
if action.condition.type == 'if':
yield action.condition.question
for __, path in cls.get_action_properties(action):
yield path
@classmethod
def get_action_properties(self, action):
action_properties = action.properties()
if 'name_path' in action_properties and action.name_path:
yield 'name', action.name_path
if 'case_name' in action_properties:
yield 'name', action.case_name
if 'external_id' in action_properties and action.external_id:
yield 'external_id', action.external_id
if 'update' in action_properties:
for name, path in action.update.items():
yield name, path
if 'case_properties' in action_properties:
for name, path in action.case_properties.items():
yield name, path
if 'preload' in action_properties:
for path, name in action.preload.items():
yield name, path
class UpdateCaseAction(FormAction):
update = DictProperty()
class PreloadAction(FormAction):
preload = DictProperty()
def is_active(self):
return bool(self.preload)
class UpdateReferralAction(FormAction):
followup_date = StringProperty()
def get_followup_date(self):
if self.followup_date:
return "if(date({followup_date}) >= date(today()), {followup_date}, date(today() + 2))".format(
followup_date=self.followup_date,
)
return self.followup_date or "date(today() + 2)"
class OpenReferralAction(UpdateReferralAction):
name_path = StringProperty()
class OpenCaseAction(FormAction):
name_path = StringProperty()
external_id = StringProperty()
class OpenSubCaseAction(FormAction):
case_type = StringProperty()
case_name = StringProperty()
reference_id = StringProperty()
case_properties = DictProperty()
repeat_context = StringProperty()
# relationship = "child" for index to a parent case (default)
# relationship = "extension" for index to a host case
relationship = StringProperty(choices=['child', 'extension'], default='child')
close_condition = SchemaProperty(FormActionCondition)
class FormActions(DocumentSchema):
open_case = SchemaProperty(OpenCaseAction)
update_case = SchemaProperty(UpdateCaseAction)
close_case = SchemaProperty(FormAction)
open_referral = SchemaProperty(OpenReferralAction)
update_referral = SchemaProperty(UpdateReferralAction)
close_referral = SchemaProperty(FormAction)
case_preload = SchemaProperty(PreloadAction)
referral_preload = SchemaProperty(PreloadAction)
load_from_form = SchemaProperty(PreloadAction) # DEPRECATED
usercase_update = SchemaProperty(UpdateCaseAction)
usercase_preload = SchemaProperty(PreloadAction)
subcases = SchemaListProperty(OpenSubCaseAction)
def all_property_names(self):
names = set()
names.update(self.update_case.update.keys())
names.update(self.case_preload.preload.values())
for subcase in self.subcases:
names.update(subcase.case_properties.keys())
return names
class CaseIndex(DocumentSchema):
tag = StringProperty()
reference_id = StringProperty(default='parent')
relationship = StringProperty(choices=['child', 'extension'], default='child')
class AdvancedAction(IndexedSchema):
case_type = StringProperty()
case_tag = StringProperty()
case_properties = DictProperty()
# case_indices = NotImplemented
close_condition = SchemaProperty(FormActionCondition)
__eq__ = DocumentSchema.__eq__
def get_paths(self):
for path in self.case_properties.values():
yield path
if self.close_condition.type == 'if':
yield self.close_condition.question
def get_property_names(self):
return set(self.case_properties.keys())
@property
def is_subcase(self):
return bool(self.case_indices)
@property
def form_element_name(self):
return "case_{}".format(self.case_tag)
class AutoSelectCase(DocumentSchema):
"""
Configuration for auto-selecting a case.
Attributes:
value_source Reference to the source of the value. For mode = fixture,
this represents the FixtureDataType ID. For mode = case
this represents the 'case_tag' for the case.
The modes 'user' and 'raw' don't require a value_source.
value_key The actual field that contains the case ID. Can be a case
index or a user data key or a fixture field name or the raw
xpath expression.
"""
mode = StringProperty(choices=[AUTO_SELECT_USER,
AUTO_SELECT_FIXTURE,
AUTO_SELECT_CASE,
AUTO_SELECT_USERCASE,
AUTO_SELECT_RAW])
value_source = StringProperty()
value_key = StringProperty(required=True)
class LoadCaseFromFixture(DocumentSchema):
"""
fixture_nodeset: FixtureDataType.tag
fixture_tag: name of the column to display in the list
fixture_variable: boolean if display_column actually contains the key for the localized string
case_property: name of the column whose value should be saved when the user selects an item
arbitrary_datum_*: adds an arbitrary datum with function before the action
"""
fixture_nodeset = StringProperty()
fixture_tag = StringProperty()
fixture_variable = StringProperty()
case_property = StringProperty(default='')
auto_select = BooleanProperty(default=False)
arbitrary_datum_id = StringProperty()
arbitrary_datum_function = StringProperty()
class LoadUpdateAction(AdvancedAction):
"""
details_module: Use the case list configuration from this module to show the cases.
preload: Value from the case to load into the form. Keys are question paths,
values are case properties.
auto_select: Configuration for auto-selecting the case
load_case_from_fixture: Configureation for loading a case using fixture data
show_product_stock: If True list the product stock using the module's Product List
configuration.
product_program: Only show products for this CommCare Supply program.
"""
details_module = StringProperty()
preload = DictProperty()
auto_select = SchemaProperty(AutoSelectCase, default=None)
load_case_from_fixture = SchemaProperty(LoadCaseFromFixture, default=None)
show_product_stock = BooleanProperty(default=False)
product_program = StringProperty()
case_index = SchemaProperty(CaseIndex)
@property
def case_indices(self):
# Allows us to ducktype AdvancedOpenCaseAction
return [self.case_index] if self.case_index.tag else []
@case_indices.setter
def case_indices(self, value):
if len(value) > 1:
raise ValueError('A LoadUpdateAction cannot have more than one case index')
if value:
self.case_index = value[0]
else:
self.case_index = CaseIndex()
@case_indices.deleter
def case_indices(self):
self.case_index = CaseIndex()
def get_paths(self):
for path in super(LoadUpdateAction, self).get_paths():
yield path
for path in self.preload.keys():
yield path
def get_property_names(self):
names = super(LoadUpdateAction, self).get_property_names()
names.update(self.preload.values())
return names
@property
def case_session_var(self):
return 'case_id_{0}'.format(self.case_tag)
@classmethod
def wrap(cls, data):
if 'parent_tag' in data:
if data['parent_tag']:
data['case_index'] = {
'tag': data['parent_tag'],
'reference_id': data.get('parent_reference_id', 'parent'),
'relationship': data.get('relationship', 'child')
}
del data['parent_tag']
data.pop('parent_reference_id', None)
data.pop('relationship', None)
return super(LoadUpdateAction, cls).wrap(data)
class AdvancedOpenCaseAction(AdvancedAction):
name_path = StringProperty()
repeat_context = StringProperty()
case_indices = SchemaListProperty(CaseIndex)
open_condition = SchemaProperty(FormActionCondition)
def get_paths(self):
for path in super(AdvancedOpenCaseAction, self).get_paths():
yield path
yield self.name_path
if self.open_condition.type == 'if':
yield self.open_condition.question
@property
def case_session_var(self):
return 'case_id_new_{}_{}'.format(self.case_type, self.id)
@classmethod
def wrap(cls, data):
if 'parent_tag' in data:
if data['parent_tag']:
index = {
'tag': data['parent_tag'],
'reference_id': data.get('parent_reference_id', 'parent'),
'relationship': data.get('relationship', 'child')
}
if hasattr(data.get('case_indices'), 'append'):
data['case_indices'].append(index)
else:
data['case_indices'] = [index]
del data['parent_tag']
data.pop('parent_reference_id', None)
data.pop('relationship', None)
return super(AdvancedOpenCaseAction, cls).wrap(data)
class AdvancedFormActions(DocumentSchema):
load_update_cases = SchemaListProperty(LoadUpdateAction)
open_cases = SchemaListProperty(AdvancedOpenCaseAction)
get_load_update_actions = IndexedSchema.Getter('load_update_cases')
get_open_actions = IndexedSchema.Getter('open_cases')
def get_all_actions(self):
return itertools.chain(self.get_load_update_actions(), self.get_open_actions())
def get_subcase_actions(self):
return (a for a in self.get_all_actions() if a.case_indices)
def get_open_subcase_actions(self, parent_case_type=None):
for action in self.open_cases:
if action.case_indices:
if not parent_case_type:
yield action
else:
if any(self.actions_meta_by_tag[case_index.tag]['action'].case_type == parent_case_type
for case_index in action.case_indices):
yield action
def get_case_tags(self):
for action in self.get_all_actions():
yield action.case_tag
def get_action_from_tag(self, tag):
return self.actions_meta_by_tag.get(tag, {}).get('action', None)
@property
def actions_meta_by_tag(self):
return self._action_meta()['by_tag']
@property
def actions_meta_by_parent_tag(self):
return self._action_meta()['by_parent_tag']
@property
def auto_select_actions(self):
return self._action_meta()['by_auto_select_mode']
@memoized
def _action_meta(self):
meta = {
'by_tag': {},
'by_parent_tag': {},
'by_auto_select_mode': {
AUTO_SELECT_USER: [],
AUTO_SELECT_CASE: [],
AUTO_SELECT_FIXTURE: [],
AUTO_SELECT_USERCASE: [],
AUTO_SELECT_RAW: [],
}
}
def add_actions(type, action_list):
for action in action_list:
meta['by_tag'][action.case_tag] = {
'type': type,
'action': action
}
for parent in action.case_indices:
meta['by_parent_tag'][parent.tag] = {
'type': type,
'action': action
}
if type == 'load' and action.auto_select and action.auto_select.mode:
meta['by_auto_select_mode'][action.auto_select.mode].append(action)
add_actions('load', self.get_load_update_actions())
add_actions('open', self.get_open_actions())
return meta
class FormSource(object):
def __get__(self, form, form_cls):
if not form:
return self
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
# for backwards compatibility of really old apps
try:
old_contents = form['contents']
except AttributeError:
pass
else:
app.lazy_put_attachment(old_contents, filename)
del form['contents']
try:
source = app.lazy_fetch_attachment(filename)
except ResourceNotFound:
source = ''
return source
def __set__(self, form, value):
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
app.lazy_put_attachment(value, filename)
form.validation_cache = None
try:
form.xmlns = form.wrapped_xform().data_node.tag_xmlns
except Exception:
form.xmlns = None
class CachedStringProperty(object):
def __init__(self, key):
self.get_key = key
def __get__(self, instance, owner):
return self.get(self.get_key(instance))
def __set__(self, instance, value):
self.set(self.get_key(instance), value)
@classmethod
def get(cls, key):
return cache.get(key)
@classmethod
def set(cls, key, value):
cache.set(key, value, 7*24*60*60) # cache for 7 days
class ScheduleVisit(IndexedSchema):
"""
due: Days after the anchor date that this visit is due
starts: Days before the due date that this visit is valid from
expires: Days after the due date that this visit is valid until (optional)
repeats: Whether this is a repeat visit (one per form allowed)
increment: Days after the last visit that the repeat visit occurs
"""
due = IntegerProperty()
starts = IntegerProperty()
expires = IntegerProperty()
repeats = BooleanProperty(default=False)
increment = IntegerProperty()
@property
def id(self):
"""Visits are 1-based indexed"""
_id = super(ScheduleVisit, self).id
return _id + 1
class FormDatum(DocumentSchema):
name = StringProperty()
xpath = StringProperty()
class FormLink(DocumentSchema):
"""
xpath: xpath condition that must be true in order to open next form
form_id: id of next form to open
"""
xpath = StringProperty()
form_id = FormIdProperty('modules[*].forms[*].form_links[*].form_id')
datums = SchemaListProperty(FormDatum)
class FormSchedule(DocumentSchema):
"""
starts: Days after the anchor date that this schedule starts
expires: Days after the anchor date that this schedule expires (optional)
visits: List of visits in this schedule
allow_unscheduled: Allow unscheduled visits in this schedule
transition_condition: Condition under which we transition to the next phase
termination_condition: Condition under which we terminate the whole schedule
"""
enabled = BooleanProperty(default=True)
starts = IntegerProperty()
expires = IntegerProperty()
allow_unscheduled = BooleanProperty(default=False)
visits = SchemaListProperty(ScheduleVisit)
get_visits = IndexedSchema.Getter('visits')
transition_condition = SchemaProperty(FormActionCondition)
termination_condition = SchemaProperty(FormActionCondition)
class CommentMixin(DocumentSchema):
"""
Documentation comment for app builders and maintainers
"""
comment = StringProperty(default='')
@property
def short_comment(self):
"""
Trim comment to 500 chars (about 100 words)
"""
return self.comment if len(self.comment) <= 500 else self.comment[:497] + '...'
class FormBase(DocumentSchema):
"""
Part of a Managed Application; configuration for a form.
Translates to a second-level menu on the phone
"""
form_type = None
name = DictProperty(unicode)
unique_id = StringProperty()
show_count = BooleanProperty(default=False)
xmlns = StringProperty()
version = IntegerProperty()
source = FormSource()
validation_cache = CachedStringProperty(
lambda self: "cache-%s-%s-validation" % (self.get_app().get_id, self.unique_id)
)
post_form_workflow = StringProperty(
default=WORKFLOW_DEFAULT,
choices=ALL_WORKFLOWS
)
auto_gps_capture = BooleanProperty(default=False)
no_vellum = BooleanProperty(default=False)
form_links = SchemaListProperty(FormLink)
schedule_form_id = StringProperty()
@classmethod
def wrap(cls, data):
data.pop('validation_cache', '')
if cls is FormBase:
doc_type = data['doc_type']
if doc_type == 'Form':
return Form.wrap(data)
elif doc_type == 'AdvancedForm':
return AdvancedForm.wrap(data)
else:
try:
return CareplanForm.wrap(data)
except ValueError:
raise ValueError('Unexpected doc_type for Form', doc_type)
else:
return super(FormBase, cls).wrap(data)
@classmethod
def get_form(cls, form_unique_id, and_app=False):
try:
d = Application.get_db().view(
'app_manager/xforms_index',
key=form_unique_id
).one()
except MultipleResultsFound as e:
raise XFormIdNotUnique(
"xform id '%s' not unique: %s" % (form_unique_id, e)
)
if d:
d = d['value']
else:
raise ResourceNotFound()
# unpack the dict into variables app_id, module_id, form_id
app_id, unique_id = [d[key] for key in ('app_id', 'unique_id')]
app = Application.get(app_id)
form = app.get_form(unique_id)
if and_app:
return form, app
else:
return form
def pre_delete_hook(self):
raise NotImplementedError()
def pre_move_hook(self, from_module, to_module):
""" Called before a form is moved between modules or to a different position """
raise NotImplementedError()
def wrapped_xform(self):
return XForm(self.source)
def validate_form(self):
vc = self.validation_cache
if vc is None:
# formtranslate requires all attributes to be valid xpaths, but
# vellum namespaced attributes aren't
form = self.wrapped_xform()
form.strip_vellum_ns_attributes()
try:
if form.xml is not None:
validate_xform(etree.tostring(form.xml))
except XFormValidationError as e:
validation_dict = {
"fatal_error": e.fatal_error,
"validation_problems": e.validation_problems,
"version": e.version,
}
vc = self.validation_cache = json.dumps(validation_dict)
else:
vc = self.validation_cache = ""
if vc:
try:
raise XFormValidationError(**json.loads(vc))
except ValueError:
self.validation_cache = None
return self.validate_form()
return self
def validate_for_build(self, validate_module=True):
errors = []
try:
module = self.get_module()
except AttributeError:
module = None
meta = {
'form_type': self.form_type,
'module': module.get_module_info() if module else {},
'form': {"id": self.id if hasattr(self, 'id') else None, "name": self.name}
}
xml_valid = False
if self.source == '':
errors.append(dict(type="blank form", **meta))
else:
try:
_parse_xml(self.source)
xml_valid = True
except XFormException as e:
errors.append(dict(
type="invalid xml",
message=unicode(e) if self.source else '',
**meta
))
except ValueError:
logging.error("Failed: _parse_xml(string=%r)" % self.source)
raise
else:
try:
self.validate_form()
except XFormValidationError as e:
error = {'type': 'validation error', 'validation_message': unicode(e)}
error.update(meta)
errors.append(error)
if self.post_form_workflow == WORKFLOW_FORM:
if not self.form_links:
errors.append(dict(type="no form links", **meta))
for form_link in self.form_links:
try:
self.get_app().get_form(form_link.form_id)
except FormNotFoundException:
errors.append(dict(type='bad form link', **meta))
# this isn't great but two of FormBase's subclasses have form_filter
if hasattr(self, 'form_filter') and self.form_filter:
is_valid, message = validate_xpath(self.form_filter, allow_case_hashtags=True)
if not is_valid:
error = {
'type': 'form filter has xpath error',
'xpath_error': message,
}
error.update(meta)
errors.append(error)
errors.extend(self.extended_build_validation(meta, xml_valid, validate_module))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
"""
Override to perform additional validation during build process.
"""
return []
def get_unique_id(self):
"""
Return unique_id if it exists, otherwise initialize it
Does _not_ force a save, so it's the caller's responsibility to save the app
"""
if not self.unique_id:
self.unique_id = random_hex()
return self.unique_id
def get_app(self):
return self._app
def get_version(self):
return self.version if self.version else self.get_app().version
def add_stuff_to_xform(self, xform, build_profile_id=None):
app = self.get_app()
langs = app.get_build_langs(build_profile_id)
xform.exclude_languages(langs)
xform.set_default_language(langs[0])
xform.normalize_itext()
xform.strip_vellum_ns_attributes()
xform.set_version(self.get_version())
def render_xform(self, build_profile_id=None):
xform = XForm(self.source)
self.add_stuff_to_xform(xform, build_profile_id)
return xform.render()
@quickcache(['self.source', 'langs', 'include_triggers', 'include_groups', 'include_translations'])
def get_questions(self, langs, include_triggers=False,
include_groups=False, include_translations=False):
return XForm(self.source).get_questions(
langs=langs,
include_triggers=include_triggers,
include_groups=include_groups,
include_translations=include_translations,
)
@memoized
def get_case_property_name_formatter(self):
"""Get a function that formats case property names
The returned function requires two arguments
`(case_property_name, data_path)` and returns a string.
"""
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormException as e:
# punt on invalid xml (sorry, no rich attachments)
valid_paths = {}
def format_key(key, path):
if valid_paths.get(path) == "upload":
return u"{}{}".format(ATTACHMENT_PREFIX, key)
return key
return format_key
def export_json(self, dump_json=True):
source = self.to_json()
del source['unique_id']
return json.dumps(source) if dump_json else source
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
try:
self.rename_xform_language(old_lang, new_lang)
except XFormException:
pass
def rename_xform_language(self, old_code, new_code):
source = XForm(self.source)
if source.exists():
source.rename_language(old_code, new_code)
source = source.render()
self.source = source
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.langs,
include_lang=False
)
@property
def full_path_name(self):
return "%(app_name)s > %(module_name)s > %(form_name)s" % {
'app_name': self.get_app().name,
'module_name': self.get_module().default_name(),
'form_name': self.default_name()
}
@property
def has_fixtures(self):
return 'src="jr://fixture/item-list:' in self.source
def get_auto_gps_capture(self):
app = self.get_app()
if app.build_version and app.enable_auto_gps:
return self.auto_gps_capture or app.auto_gps_capture
else:
return False
def is_registration_form(self, case_type=None):
"""
Should return True if this form passes the following tests:
* does not require a case
* registers a case of type 'case_type' if supplied
"""
raise NotImplementedError()
def uses_usercase(self):
raise NotImplementedError()
def update_app_case_meta(self, app_case_meta):
pass
@property
@memoized
def case_list_modules(self):
case_list_modules = [
mod for mod in self.get_app().get_modules() if mod.case_list_form.form_id == self.unique_id
]
return case_list_modules
@property
def is_case_list_form(self):
return bool(self.case_list_modules)
class IndexedFormBase(FormBase, IndexedSchema, CommentMixin):
def get_app(self):
return self._parent._parent
def get_module(self):
return self._parent
def get_case_type(self):
return self._parent.case_type
def check_case_properties(self, all_names=None, subcase_names=None, case_tag=None):
all_names = all_names or []
subcase_names = subcase_names or []
errors = []
# reserved_words are hard-coded in three different places!
# Here, case-config-ui-*.js, and module_view.html
reserved_words = load_case_reserved_words()
for key in all_names:
try:
validate_property(key)
except ValueError:
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
_, key = split_path(key)
if key in reserved_words:
errors.append({'type': 'update_case uses reserved word', 'word': key, 'case_tag': case_tag})
# no parent properties for subcase
for key in subcase_names:
if not re.match(r'^[a-zA-Z][\w_-]*$', key):
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
return errors
def check_paths(self, paths):
errors = []
try:
questions = self.get_questions(langs=[], include_triggers=True, include_groups=True)
valid_paths = {question['value']: question['tag'] for question in questions}
except XFormException as e:
errors.append({'type': 'invalid xml', 'message': unicode(e)})
else:
no_multimedia = not self.get_app().enable_multimedia_case_property
for path in set(paths):
if path not in valid_paths:
errors.append({'type': 'path error', 'path': path})
elif no_multimedia and valid_paths[path] == "upload":
errors.append({'type': 'multimedia case property not supported', 'path': path})
return errors
def add_property_save(self, app_case_meta, case_type, name,
questions, question_path, condition=None):
if question_path in questions:
app_case_meta.add_property_save(
case_type,
name,
self.unique_id,
questions[question_path],
condition
)
else:
app_case_meta.add_property_error(
case_type,
name,
self.unique_id,
"%s is not a valid question" % question_path
)
def add_property_load(self, app_case_meta, case_type, name,
questions, question_path):
if question_path in questions:
app_case_meta.add_property_load(
case_type,
name,
self.unique_id,
questions[question_path]
)
else:
app_case_meta.add_property_error(
case_type,
name,
self.unique_id,
"%s is not a valid question" % question_path
)
class JRResourceProperty(StringProperty):
def validate(self, value, required=True):
super(JRResourceProperty, self).validate(value, required)
if value is not None and not value.startswith('jr://'):
raise BadValueError("JR Resources must start with 'jr://")
return value
class NavMenuItemMediaMixin(DocumentSchema):
"""
Language-specific icon and audio.
Properties are map of lang-code to filepath
"""
media_image = SchemaDictProperty(JRResourceProperty)
media_audio = SchemaDictProperty(JRResourceProperty)
@classmethod
def wrap(cls, data):
# ToDo - Remove after migration
for media_attr in ('media_image', 'media_audio'):
old_media = data.get(media_attr, None)
if old_media and isinstance(old_media, basestring):
new_media = {'default': old_media}
data[media_attr] = new_media
return super(NavMenuItemMediaMixin, cls).wrap(data)
def _get_media_by_language(self, media_attr, lang, strict=False):
"""
Return media-path for given language if one exists, else 1st path in the
sorted lang->media-path list
*args:
media_attr: one of 'media_image' or 'media_audio'
lang: language code
**kwargs:
strict: whether to return None if media-path is not set for lang or
to return first path in sorted lang->media-path list
"""
assert media_attr in ('media_image', 'media_audio')
media_dict = getattr(self, media_attr)
if not media_dict:
return None
if media_dict.get(lang, ''):
return media_dict[lang]
if not strict:
# if the queried lang key doesn't exist,
# return the first in the sorted list
for lang, item in sorted(media_dict.items()):
return item
@property
def default_media_image(self):
# For older apps that were migrated
return self.icon_by_language('default')
@property
def default_media_audio(self):
# For older apps that were migrated
return self.audio_by_language('default')
def icon_by_language(self, lang, strict=False):
return self._get_media_by_language('media_image', lang, strict=strict)
def audio_by_language(self, lang, strict=False):
return self._get_media_by_language('media_audio', lang, strict=strict)
def _set_media(self, media_attr, lang, media_path):
"""
Caller's responsibility to save doc.
Currently only called from the view which saves after all Edits
"""
assert media_attr in ('media_image', 'media_audio')
media_dict = getattr(self, media_attr) or {}
media_dict[lang] = media_path or ''
setattr(self, media_attr, media_dict)
def set_icon(self, lang, icon_path):
self._set_media('media_image', lang, icon_path)
def set_audio(self, lang, audio_path):
self._set_media('media_audio', lang, audio_path)
def _all_media_paths(self, media_attr):
assert media_attr in ('media_image', 'media_audio')
media_dict = getattr(self, media_attr) or {}
valid_media_paths = {media for media in media_dict.values() if media}
return list(valid_media_paths)
def all_image_paths(self):
return self._all_media_paths('media_image')
def all_audio_paths(self):
return self._all_media_paths('media_audio')
def icon_app_string(self, lang, for_default=False):
"""
Return lang/app_strings.txt translation for given lang
if a path exists for the lang
**kwargs:
for_default: whether app_string is for default/app_strings.txt
"""
if not for_default and self.icon_by_language(lang, strict=True):
return self.icon_by_language(lang, strict=True)
if for_default:
return self.icon_by_language(lang, strict=False)
def audio_app_string(self, lang, for_default=False):
"""
see note on self.icon_app_string
"""
if not for_default and self.audio_by_language(lang, strict=True):
return self.audio_by_language(lang, strict=True)
if for_default:
return self.audio_by_language(lang, strict=False)
class Form(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'module_form'
form_filter = StringProperty()
requires = StringProperty(choices=["case", "referral", "none"], default="none")
actions = SchemaProperty(FormActions)
case_references_data = DictProperty()
def add_stuff_to_xform(self, xform, build_profile_id=None):
super(Form, self).add_stuff_to_xform(xform, build_profile_id)
xform.add_case_and_meta(self)
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def session_var_for_action(self, action):
module_case_type = self.get_module().case_type
if action == 'open_case':
return 'case_id_new_{}_0'.format(module_case_type)
if isinstance(action, OpenSubCaseAction):
subcase_type = action.case_type
subcase_index = self.actions.subcases.index(action)
opens_case = 'open_case' in self.active_actions()
if opens_case:
subcase_index += 1
return 'case_id_new_{}_{}'.format(subcase_type, subcase_index)
def _get_active_actions(self, types):
actions = {}
for action_type in types:
a = getattr(self.actions, action_type)
if isinstance(a, list):
if a:
actions[action_type] = a
elif a.is_active():
actions[action_type] = a
return actions
@memoized
def get_action_type(self):
if self.actions.close_case.condition.is_active():
return 'close'
elif (self.actions.open_case.condition.is_active() or
self.actions.subcases):
return 'open'
elif self.actions.update_case.condition.is_active():
return 'update'
else:
return 'none'
@memoized
def get_icon_help_text(self):
messages = []
if self.actions.open_case.condition.is_active():
messages.append(_('This form opens a {}').format(self.get_module().case_type))
if self.actions.subcases:
messages.append(_('This form opens a subcase {}').format(', '.join(self.get_subcase_types())))
if self.actions.close_case.condition.is_active():
messages.append(_('This form closes a {}').format(self.get_module().case_type))
elif self.requires_case():
messages.append(_('This form updates a {}').format(self.get_module().case_type))
return '. '.join(messages)
def active_actions(self):
self.get_app().assert_app_v2()
if self.requires == 'none':
action_types = (
'open_case', 'update_case', 'close_case', 'subcases',
'usercase_update', 'usercase_preload',
)
elif self.requires == 'case':
action_types = (
'update_case', 'close_case', 'case_preload', 'subcases',
'usercase_update', 'usercase_preload', 'load_from_form',
)
else:
# this is left around for legacy migrated apps
action_types = (
'open_case', 'update_case', 'close_case',
'case_preload', 'subcases',
'usercase_update', 'usercase_preload',
)
return self._get_active_actions(action_types)
def active_non_preloader_actions(self):
return self._get_active_actions((
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral'))
def check_actions(self):
errors = []
subcase_names = set()
for subcase_action in self.actions.subcases:
if not subcase_action.case_type:
errors.append({'type': 'subcase has no case type'})
subcase_names.update(subcase_action.case_properties)
if self.requires == 'none' and self.actions.open_case.is_active() \
and not self.actions.open_case.name_path:
errors.append({'type': 'case_name required'})
errors.extend(self.check_case_properties(
all_names=self.actions.all_property_names(),
subcase_names=subcase_names
))
def generate_paths():
for action in self.active_actions().values():
if isinstance(action, list):
actions = action
else:
actions = [action]
for action in actions:
for path in FormAction.get_action_paths(action):
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def requires_case(self):
# all referrals also require cases
return self.requires in ("case", "referral")
def requires_case_type(self):
return self.requires_case() or \
bool(self.active_non_preloader_actions())
def requires_referral(self):
return self.requires == "referral"
def uses_parent_case(self):
"""
Returns True if any of the load/update properties references the
parent case; False otherwise
"""
return any([name.startswith('parent/')
for name in self.actions.all_property_names()])
def get_registration_actions(self, case_type):
"""
:return: List of actions that create a case. Subcase actions are included
as long as they are not inside a repeat. If case_type is not None
only return actions that create a case of the specified type.
"""
reg_actions = []
if 'open_case' in self.active_actions() and (not case_type or self.get_module().case_type == case_type):
reg_actions.append('open_case')
subcase_actions = [action for action in self.actions.subcases if not action.repeat_context]
if case_type:
subcase_actions = [a for a in subcase_actions if a.case_type == case_type]
reg_actions.extend(subcase_actions)
return reg_actions
def is_registration_form(self, case_type=None):
reg_actions = self.get_registration_actions(case_type)
return len(reg_actions) == 1
def uses_usercase(self):
return actions_use_usercase(self.active_actions())
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
if validate_module:
needs_case_type = False
needs_case_detail = False
needs_referral_detail = False
if self.requires_case():
needs_case_detail = True
needs_case_type = True
if self.requires_case_type():
needs_case_type = True
if self.requires_referral():
needs_referral_detail = True
errors.extend(self.get_module().get_case_errors(
needs_case_type=needs_case_type,
needs_case_detail=needs_case_detail,
needs_referral_detail=needs_referral_detail,
))
return errors
def get_case_updates(self, case_type):
# This method is used by both get_all_case_properties and
# get_usercase_properties. In the case of usercase properties, use
# the usercase_update action, and for normal cases, use the
# update_case action
if case_type == self.get_module().case_type or case_type == USERCASE_TYPE:
format_key = self.get_case_property_name_formatter()
action = self.actions.usercase_update if case_type == USERCASE_TYPE else self.actions.update_case
return [format_key(*item) for item in action.update.items()]
return []
@memoized
def get_subcase_types(self):
'''
Return a list of each case type for which this Form opens a new subcase.
:return:
'''
return {subcase.case_type for subcase in self.actions.subcases
if subcase.close_condition.type == "never" and subcase.case_type}
@property
def case_references(self):
refs = self.case_references_data or {}
if "load" not in refs and self.actions.load_from_form.preload:
# for backward compatibility
# preload only has one reference per question path
preload = self.actions.load_from_form.preload
refs["load"] = {key: [value] for key, value in preload.iteritems()}
return refs
@case_references.setter
def case_references(self, refs):
"""Set case references
format: {"load": {"/data/path": ["case_property", ...], ...}}
"""
self.case_references_data = refs
if self.actions.load_from_form.preload:
self.actions.load_from_form = PreloadAction()
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.subcases:
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
if case_type != module_case_type and (
self.actions.open_case.is_active() or
self.actions.update_case.is_active() or
self.actions.close_case.is_active()):
parent_types.add((module_case_type, subcase.reference_id or 'parent'))
return parent_types, case_properties
def update_app_case_meta(self, app_case_meta):
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
questions = {
q['value']: FormQuestionResponse(q)
for q in self.get_questions(self.get_app().langs, include_triggers=True,
include_groups=True, include_translations=True)
}
module_case_type = self.get_module().case_type
type_meta = app_case_meta.get_type(module_case_type)
for type_, action in self.active_actions().items():
if type_ == 'open_case':
type_meta.add_opener(self.unique_id, action.condition)
self.add_property_save(
app_case_meta,
module_case_type,
'name',
questions,
action.name_path
)
if type_ == 'close_case':
type_meta.add_closer(self.unique_id, action.condition)
if type_ == 'update_case' or type_ == 'usercase_update':
for name, question_path in FormAction.get_action_properties(action):
self.add_property_save(
app_case_meta,
USERCASE_TYPE if type_ == 'usercase_update' else module_case_type,
name,
questions,
question_path
)
if type_ == 'case_preload' or type_ == 'load_from_form' or type_ == 'usercase_preload':
for name, question_path in FormAction.get_action_properties(action):
self.add_property_load(
app_case_meta,
USERCASE_TYPE if type_ == 'usercase_preload' else module_case_type,
name,
questions,
question_path
)
if type_ == 'subcases':
for act in action:
if act.is_active():
sub_type_meta = app_case_meta.get_type(act.case_type)
sub_type_meta.add_opener(self.unique_id, act.condition)
if act.close_condition.is_active():
sub_type_meta.add_closer(self.unique_id, act.close_condition)
for name, question_path in FormAction.get_action_properties(act):
self.add_property_save(
app_case_meta,
act.case_type,
name,
questions,
question_path
)
case_loads = self.case_references.get("load", {})
for question_path, case_properties in case_loads.iteritems():
for name in case_properties:
self.add_property_load(
app_case_meta,
module_case_type,
name,
questions,
question_path
)
class MappingItem(DocumentSchema):
key = StringProperty()
# lang => localized string
value = DictProperty()
@property
def treat_as_expression(self):
"""
Returns if whether the key can be treated as a valid expression that can be included in
condition-predicate of an if-clause for e.g. if(<expression>, value, ...)
"""
special_chars = '{}()[]=<>."\'/'
return any(special_char in self.key for special_char in special_chars)
@property
def key_as_variable(self):
"""
Return an xml variable name to represent this key.
If the key contains spaces or a condition-predicate of an if-clause,
return a hash of the key with "h" prepended.
If not, return the key with "k" prepended.
The prepended characters prevent the variable name from starting with a
numeral, which is illegal.
"""
if ' ' in self.key or self.treat_as_expression:
return 'h{hash}'.format(hash=hashlib.md5(self.key).hexdigest()[:8])
else:
return 'k{key}'.format(key=self.key)
def key_as_condition(self, property):
if self.treat_as_expression:
condition = dot_interpolate(self.key, property)
return u"{condition}".format(condition=condition)
else:
return u"{property} = '{key}'".format(
property=property,
key=self.key
)
def ref_to_key_variable(self, index, sort_or_display):
if sort_or_display == "sort":
key_as_var = "{}, ".format(index)
elif sort_or_display == "display":
key_as_var = "${var_name}, ".format(var_name=self.key_as_variable)
return key_as_var
class GraphAnnotations(IndexedSchema):
display_text = DictProperty()
x = StringProperty()
y = StringProperty()
class GraphSeries(DocumentSchema):
config = DictProperty()
locale_specific_config = DictProperty()
data_path = StringProperty()
x_function = StringProperty()
y_function = StringProperty()
radius_function = StringProperty()
class GraphConfiguration(DocumentSchema):
config = DictProperty()
locale_specific_config = DictProperty()
annotations = SchemaListProperty(GraphAnnotations)
graph_type = StringProperty()
series = SchemaListProperty(GraphSeries)
class DetailTab(IndexedSchema):
"""
Represents a tab in the case detail screen on the phone.
Each tab is itself a detail, nested inside the app's "main" detail.
"""
header = DictProperty()
# The first index, of all fields in the parent detail, that belongs to this tab
starting_index = IntegerProperty()
# A tab may be associated with a nodeset, resulting in a detail that
# iterates through sub-nodes of an entity rather than a single entity
has_nodeset = BooleanProperty(default=False)
nodeset = StringProperty()
class DetailColumn(IndexedSchema):
"""
Represents a column in case selection screen on the phone. Ex:
{
'header': {'en': 'Sex', 'por': 'Sexo'},
'model': 'case',
'field': 'sex',
'format': 'enum',
'xpath': '.',
'enum': [
{'key': 'm', 'value': {'en': 'Male', 'por': 'Macho'},
{'key': 'f', 'value': {'en': 'Female', 'por': 'Fêmea'},
],
}
"""
header = DictProperty()
model = StringProperty()
field = StringProperty()
format = StringProperty()
enum = SchemaListProperty(MappingItem)
graph_configuration = SchemaProperty(GraphConfiguration)
case_tile_field = StringProperty()
late_flag = IntegerProperty(default=30)
advanced = StringProperty(default="")
calc_xpath = StringProperty(default=".")
filter_xpath = StringProperty(default="")
time_ago_interval = FloatProperty(default=365.25)
@property
def enum_dict(self):
"""for backwards compatibility with building 1.0 apps"""
import warnings
warnings.warn('You should not use enum_dict. Use enum instead',
DeprecationWarning)
return dict((item.key, item.value) for item in self.enum)
def rename_lang(self, old_lang, new_lang):
for dct in [self.header] + [item.value for item in self.enum]:
_rename_key(dct, old_lang, new_lang)
@property
def field_type(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[0]
else:
return 'property' # equivalent to property:parent/case_property
@property
def field_property(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[1]
else:
return self.field
class TimeAgoInterval(object):
map = {
'day': 1.0,
'week': 7.0,
'month': 30.4375,
'year': 365.25
}
@classmethod
def get_from_old_format(cls, format):
if format == 'years-ago':
return cls.map['year']
elif format == 'months-ago':
return cls.map['month']
@classmethod
def wrap(cls, data):
if data.get('format') in ('months-ago', 'years-ago'):
data['time_ago_interval'] = cls.TimeAgoInterval.get_from_old_format(data['format'])
data['format'] = 'time-ago'
# Lazy migration: enum used to be a dict, now is a list
if isinstance(data.get('enum'), dict):
data['enum'] = sorted({'key': key, 'value': value}
for key, value in data['enum'].items())
return super(DetailColumn, cls).wrap(data)
@classmethod
def from_json(cls, data):
from corehq.apps.app_manager.views.media_utils import interpolate_media_path
to_ret = cls.wrap(data)
if to_ret.format == 'enum-image':
# interpolate icons-paths
for item in to_ret.enum:
for lang, path in item.value.iteritems():
item.value[lang] = interpolate_media_path(path)
return to_ret
class SortElement(IndexedSchema):
field = StringProperty()
type = StringProperty()
direction = StringProperty()
display = DictProperty()
def has_display_values(self):
return any(s.strip() != '' for s in self.display.values())
class CaseListLookupMixin(DocumentSchema):
"""
Allows for the addition of Android Callouts to do lookups from the CaseList
<lookup action="" image="" name="">
<extra key="" value="" />
<response key="" />
<field>
<header><text><locale id=""/></text></header>
<template><text><xpath function=""/></text></template>
</field>
</lookup>
"""
lookup_enabled = BooleanProperty(default=False)
lookup_autolaunch = BooleanProperty(default=False)
lookup_action = StringProperty()
lookup_name = StringProperty()
lookup_image = JRResourceProperty(required=False)
lookup_extras = SchemaListProperty()
lookup_responses = SchemaListProperty()
lookup_display_results = BooleanProperty(default=False) # Display callout results in case list?
lookup_field_header = DictProperty()
lookup_field_template = StringProperty()
class Detail(IndexedSchema, CaseListLookupMixin):
"""
Full configuration for a case selection screen
"""
display = StringProperty(choices=['short', 'long'])
columns = SchemaListProperty(DetailColumn)
get_columns = IndexedSchema.Getter('columns')
tabs = SchemaListProperty(DetailTab)
get_tabs = IndexedSchema.Getter('tabs')
sort_elements = SchemaListProperty(SortElement)
filter = StringProperty()
# If True, a small tile will display the case name after selection.
persist_case_context = BooleanProperty()
persistent_case_context_xml = StringProperty(default='case_name')
# Custom variables to add into the <variables /> node
custom_variables = StringProperty()
# If True, use case tiles in the case list
use_case_tiles = BooleanProperty()
# If given, use this string for the case tile markup instead of the default temaplte
custom_xml = StringProperty()
persist_tile_on_forms = BooleanProperty()
# If True, the in form tile can be pulled down to reveal all the case details.
pull_down_tile = BooleanProperty()
def get_tab_spans(self):
'''
Return the starting and ending indices into self.columns deliminating
the columns that should be in each tab.
:return:
'''
tabs = list(self.get_tabs())
ret = []
for tab in tabs:
try:
end = tabs[tab.id + 1].starting_index
except IndexError:
end = len(self.columns)
ret.append((tab.starting_index, end))
return ret
@parse_int([1])
def get_column(self, i):
return self.columns[i].with_id(i % len(self.columns), self)
def rename_lang(self, old_lang, new_lang):
for column in self.columns:
column.rename_lang(old_lang, new_lang)
class CaseList(IndexedSchema, NavMenuItemMediaMixin):
label = DictProperty()
show = BooleanProperty(default=False)
def rename_lang(self, old_lang, new_lang):
_rename_key(self.label, old_lang, new_lang)
class CaseSearchProperty(DocumentSchema):
"""
Case properties available to search on.
"""
name = StringProperty()
label = DictProperty()
class DefaultCaseSearchProperty(DocumentSchema):
"""Case Properties with fixed value to search on"""
property = StringProperty()
default_value = StringProperty()
class CaseSearch(DocumentSchema):
"""
Properties and search command label
"""
command_label = DictProperty(default={'en': 'Search All Cases'})
properties = SchemaListProperty(CaseSearchProperty)
relevant = StringProperty(default=CLAIM_DEFAULT_RELEVANT_CONDITION)
include_closed = BooleanProperty(default=False)
default_properties = SchemaListProperty(DefaultCaseSearchProperty)
class ParentSelect(DocumentSchema):
active = BooleanProperty(default=False)
relationship = StringProperty(default='parent')
module_id = StringProperty()
class FixtureSelect(DocumentSchema):
"""
Configuration for creating a details screen from a fixture which can be used to pre-filter
cases prior to displaying the case list.
fixture_type: FixtureDataType.tag
display_column: name of the column to display in the list
localize: boolean if display_column actually contains the key for the localized string
variable_column: name of the column whose value should be saved when the user selects an item
xpath: xpath expression to use as the case filter
"""
active = BooleanProperty(default=False)
fixture_type = StringProperty()
display_column = StringProperty()
localize = BooleanProperty(default=False)
variable_column = StringProperty()
xpath = StringProperty(default='')
class DetailPair(DocumentSchema):
short = SchemaProperty(Detail)
long = SchemaProperty(Detail)
@classmethod
def wrap(cls, data):
self = super(DetailPair, cls).wrap(data)
self.short.display = 'short'
self.long.display = 'long'
return self
class CaseListForm(NavMenuItemMediaMixin):
form_id = FormIdProperty('modules[*].case_list_form.form_id')
label = DictProperty()
def rename_lang(self, old_lang, new_lang):
_rename_key(self.label, old_lang, new_lang)
class ModuleBase(IndexedSchema, NavMenuItemMediaMixin, CommentMixin):
name = DictProperty(unicode)
unique_id = StringProperty()
case_type = StringProperty()
case_list_form = SchemaProperty(CaseListForm)
module_filter = StringProperty()
root_module_id = StringProperty()
fixture_select = SchemaProperty(FixtureSelect)
auto_select_case = BooleanProperty(default=False)
@property
def is_surveys(self):
return self.case_type == ""
@classmethod
def wrap(cls, data):
if cls is ModuleBase:
doc_type = data['doc_type']
if doc_type == 'Module':
return Module.wrap(data)
elif doc_type == 'CareplanModule':
return CareplanModule.wrap(data)
elif doc_type == 'AdvancedModule':
return AdvancedModule.wrap(data)
elif doc_type == 'ReportModule':
return ReportModule.wrap(data)
elif doc_type == 'ShadowModule':
return ShadowModule.wrap(data)
else:
raise ValueError('Unexpected doc_type for Module', doc_type)
else:
return super(ModuleBase, cls).wrap(data)
def get_or_create_unique_id(self):
"""
It is the caller's responsibility to save the Application
after calling this function.
WARNING: If called on the same doc in different requests without saving,
this function will return a different uuid each time,
likely causing unexpected behavior
"""
if not self.unique_id:
self.unique_id = random_hex()
return self.unique_id
get_forms = IndexedSchema.Getter('forms')
get_suite_forms = IndexedSchema.Getter('forms')
@parse_int([1])
def get_form(self, i):
try:
return self.forms[i].with_id(i % len(self.forms), self)
except IndexError:
raise FormNotFoundException()
def get_child_modules(self):
return [
module for module in self.get_app().get_modules()
if module.unique_id != self.unique_id and getattr(module, 'root_module_id', None) == self.unique_id
]
@property
def root_module(self):
if self.root_module_id:
return self._parent.get_module_by_unique_id(self.root_module_id,
error=_("Could not find parent menu for '{}'").format(self.default_name()))
def requires_case_details(self):
return False
def get_case_types(self):
return set([self.case_type])
def get_module_info(self):
return {
'id': self.id,
'name': self.name,
}
def get_app(self):
return self._parent
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.langs,
include_lang=False
)
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
for form in self.get_forms():
form.rename_lang(old_lang, new_lang)
for _, detail, _ in self.get_details():
detail.rename_lang(old_lang, new_lang)
def validate_detail_columns(self, columns):
from corehq.apps.app_manager.suite_xml.const import FIELD_TYPE_LOCATION
from corehq.apps.locations.util import parent_child
hierarchy = None
for column in columns:
if column.field_type == FIELD_TYPE_LOCATION:
hierarchy = hierarchy or parent_child(self.get_app().domain)
try:
LocationXpath('').validate(column.field_property, hierarchy)
except LocationXpathValidationError, e:
yield {
'type': 'invalid location xpath',
'details': unicode(e),
'module': self.get_module_info(),
'column': column,
}
def get_form_by_unique_id(self, unique_id):
for form in self.get_forms():
if form.get_unique_id() == unique_id:
return form
def validate_for_build(self):
errors = []
needs_case_detail = self.requires_case_details()
needs_case_type = needs_case_detail or len([1 for f in self.get_forms() if f.is_registration_form()])
if needs_case_detail or needs_case_type:
errors.extend(self.get_case_errors(
needs_case_type=needs_case_type,
needs_case_detail=needs_case_detail
))
if self.case_list_form.form_id:
try:
form = self.get_app().get_form(self.case_list_form.form_id)
except FormNotFoundException:
errors.append({
'type': 'case list form missing',
'module': self.get_module_info()
})
else:
if not form.is_registration_form(self.case_type):
errors.append({
'type': 'case list form not registration',
'module': self.get_module_info(),
'form': form,
})
if self.module_filter:
is_valid, message = validate_xpath(self.module_filter)
if not is_valid:
errors.append({
'type': 'module filter has xpath error',
'xpath_error': message,
'module': self.get_module_info(),
})
return errors
@memoized
def get_subcase_types(self):
'''
Return a set of each case type for which this module has a form that
opens a new subcase of that type.
'''
subcase_types = set()
for form in self.get_forms():
if hasattr(form, 'get_subcase_types'):
subcase_types.update(form.get_subcase_types())
return subcase_types
def get_custom_entries(self):
"""
By default, suite entries are configured by forms, but you can also provide custom
entries by overriding this function.
See ReportModule for an example
"""
return []
def uses_media(self):
"""
Whether the module uses media. If this returns false then media will not be generated
for the module.
"""
return True
def uses_usercase(self):
return False
def add_insert_form(self, from_module, form, index=None, with_source=False):
raise IncompatibleFormTypeException()
class ModuleDetailsMixin():
@classmethod
def wrap_details(cls, data):
if 'details' in data:
try:
case_short, case_long, ref_short, ref_long = data['details']
except ValueError:
# "need more than 0 values to unpack"
pass
else:
data['case_details'] = {
'short': case_short,
'long': case_long,
}
data['ref_details'] = {
'short': ref_short,
'long': ref_long,
}
finally:
del data['details']
return data
@property
def case_list_filter(self):
try:
return self.case_details.short.filter
except AttributeError:
return None
@property
def detail_sort_elements(self):
try:
return self.case_details.short.sort_elements
except Exception:
return []
def rename_lang(self, old_lang, new_lang):
super(Module, self).rename_lang(old_lang, new_lang)
for case_list in (self.case_list, self.referral_list):
case_list.rename_lang(old_lang, new_lang)
def export_json(self, dump_json=True, keep_unique_id=False):
source = self.to_json()
if not keep_unique_id:
for form in source['forms']:
del form['unique_id']
return json.dumps(source) if dump_json else source
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('ref_short', self.ref_details.short, False),
('ref_long', self.ref_details.long, False),
)
def validate_details_for_build(self):
errors = []
for sort_element in self.detail_sort_elements:
try:
validate_detail_screen_field(sort_element.field)
except ValueError:
errors.append({
'type': 'invalid sort field',
'field': sort_element.field,
'module': self.get_module_info(),
})
if self.case_list_filter:
try:
case_list_filter = interpolate_xpath(self.case_list_filter)
etree.XPath(case_list_filter)
except (etree.XPathSyntaxError, CaseXPathValidationError):
errors.append({
'type': 'invalid filter xpath',
'module': self.get_module_info(),
'filter': self.case_list_filter,
})
for detail in [self.case_details.short, self.case_details.long]:
if detail.use_case_tiles:
if not detail.display == "short":
errors.append({
'type': "invalid tile configuration",
'module': self.get_module_info(),
'reason': _('Case tiles may only be used for the case list (not the case details).')
})
col_by_tile_field = {c.case_tile_field: c for c in detail.columns}
for field in ["header", "top_left", "sex", "bottom_left", "date"]:
if field not in col_by_tile_field:
errors.append({
'type': "invalid tile configuration",
'module': self.get_module_info(),
'reason': _('A case property must be assigned to the "{}" tile field.'.format(field))
})
return errors
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
columns = self.case_details.short.columns + self.case_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
if needs_referral_detail and not self.ref_details.short.columns:
yield {
'type': 'no ref detail',
'module': module_info,
}
class Module(ModuleBase, ModuleDetailsMixin):
"""
A group of related forms, and configuration that applies to them all.
Translates to a top-level menu on the phone.
"""
module_type = 'basic'
case_label = DictProperty()
referral_label = DictProperty()
forms = SchemaListProperty(Form)
case_details = SchemaProperty(DetailPair)
ref_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
referral_list = SchemaProperty(CaseList)
task_list = SchemaProperty(CaseList)
parent_select = SchemaProperty(ParentSelect)
search_config = SchemaProperty(CaseSearch)
display_style = StringProperty(default='list')
@classmethod
def wrap(cls, data):
data = cls.wrap_details(data)
return super(Module, cls).wrap(data)
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = Module(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
case_label={(lang or 'en'): 'Cases'},
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = Form(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, Form):
new_form = form
elif isinstance(form, AdvancedForm) and not form.actions.get_all_actions():
new_form = Form(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
else:
raise IncompatibleFormTypeException()
if index is not None:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def validate_for_build(self):
errors = super(Module, self).validate_for_build() + self.validate_details_for_build()
if not self.forms and not self.case_list.show:
errors.append({
'type': 'no forms or case list',
'module': self.get_module_info(),
})
if module_case_hierarchy_has_circular_reference(self):
errors.append({
'type': 'circular case hierarchy',
'module': self.get_module_info(),
})
return errors
def requires(self):
r = set(["none"])
for form in self.get_forms():
r.add(form.requires)
if self.case_list.show:
r.add('case')
if self.referral_list.show:
r.add('referral')
for val in ("referral", "case", "none"):
if val in r:
return val
def requires_case_details(self):
ret = False
if self.case_list.show:
return True
for form in self.get_forms():
if form.requires_case():
ret = True
break
return ret
@memoized
def all_forms_require_a_case(self):
return all([form.requires == 'case' for form in self.get_forms()])
def uses_usercase(self):
"""Return True if this module has any forms that use the usercase.
"""
return any(form.uses_usercase() for form in self.get_forms())
def grid_display_style(self):
return self.display_style == 'grid'
class AdvancedForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'advanced_form'
form_filter = StringProperty()
actions = SchemaProperty(AdvancedFormActions)
schedule = SchemaProperty(FormSchedule, default=None)
@classmethod
def wrap(cls, data):
# lazy migration to swap keys with values in action preload dict.
# http://manage.dimagi.com/default.asp?162213
load_actions = data.get('actions', {}).get('load_update_cases', [])
for action in load_actions:
preload = action['preload']
if preload and preload.values()[0].startswith('/'):
action['preload'] = {v: k for k, v in preload.items()}
return super(AdvancedForm, cls).wrap(data)
def pre_delete_hook(self):
try:
self.disable_schedule()
except (ScheduleError, TypeError, AttributeError) as e:
logging.error("There was a {error} while running the pre_delete_hook on {form_id}. "
"There is probably nothing to worry about, but you could check to make sure "
"that there are no issues with this form.".format(error=e, form_id=self.unique_id))
pass
def pre_move_hook(self, from_module, to_module):
if from_module != to_module:
try:
self.disable_schedule()
except (ScheduleError, TypeError, AttributeError) as e:
logging.error("There was a {error} while running the pre_move_hook on {form_id}. "
"There is probably nothing to worry about, but you could check to make sure "
"that there are no issues with this module.".format(error=e, form_id=self.unique_id))
pass
def add_stuff_to_xform(self, xform, build_profile_id=None):
super(AdvancedForm, self).add_stuff_to_xform(xform, build_profile_id)
xform.add_case_and_meta_advanced(self)
def requires_case(self):
"""Form requires a case that must be selected by the user (excludes autoloaded cases)
"""
return any(not action.auto_select for action in self.actions.load_update_cases)
@property
def requires(self):
return 'case' if self.requires_case() else 'none'
def is_registration_form(self, case_type=None):
"""
Defined as form that opens a single case. If the case is a sub-case then
the form is only allowed to load parent cases (and any auto-selected cases).
"""
reg_actions = self.get_registration_actions(case_type)
if len(reg_actions) != 1:
return False
load_actions = [action for action in self.actions.load_update_cases if not action.auto_select]
if not load_actions:
return True
reg_action = reg_actions[0]
if not reg_action.case_indices:
return False
actions_by_tag = deepcopy(self.actions.actions_meta_by_tag)
actions_by_tag.pop(reg_action.case_tag)
def check_parents(tag):
"""Recursively check parent actions to ensure that all actions for this form are
either parents of the registration action or else auto-select actions.
"""
if not tag:
return not actions_by_tag or all(
getattr(a['action'], 'auto_select', False) for a in actions_by_tag.values()
)
try:
parent = actions_by_tag.pop(tag)
except KeyError:
return False
return all(check_parents(p.tag) for p in parent['action'].case_indices)
return all(check_parents(parent.tag) for parent in reg_action.case_indices)
def get_registration_actions(self, case_type=None):
"""
:return: List of actions that create a case. Subcase actions are included
as long as they are not inside a repeat. If case_type is not None
only return actions that create a case of the specified type.
"""
registration_actions = [
action for action in self.actions.get_open_actions()
if not action.is_subcase or not action.repeat_context
]
if case_type:
registration_actions = [a for a in registration_actions if a.case_type == case_type]
return registration_actions
def uses_case_type(self, case_type, invert_match=False):
def match(ct):
matches = ct == case_type
return not matches if invert_match else matches
return any(action for action in self.actions.load_update_cases if match(action.case_type))
def uses_usercase(self):
return self.uses_case_type(USERCASE_TYPE)
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def get_module(self):
return self._parent
def get_phase(self):
module = self.get_module()
return next((phase for phase in module.get_schedule_phases()
for form in phase.get_forms()
if form.unique_id == self.unique_id),
None)
def disable_schedule(self):
self.schedule.enabled = False
phase = self.get_phase()
if phase:
phase.remove_form(self)
def check_actions(self):
errors = []
for action in self.actions.get_subcase_actions():
case_tags = self.actions.get_case_tags()
for case_index in action.case_indices:
if case_index.tag not in case_tags:
errors.append({'type': 'missing parent tag', 'case_tag': case_index.tag})
if isinstance(action, AdvancedOpenCaseAction):
if not action.name_path:
errors.append({'type': 'case_name required', 'case_tag': action.case_tag})
for case_index in action.case_indices:
meta = self.actions.actions_meta_by_tag.get(case_index.tag)
if meta and meta['type'] == 'open' and meta['action'].repeat_context:
if (
not action.repeat_context or
not action.repeat_context.startswith(meta['action'].repeat_context)
):
errors.append({'type': 'subcase repeat context',
'case_tag': action.case_tag,
'parent_tag': case_index.tag})
errors.extend(self.check_case_properties(
subcase_names=action.get_property_names(),
case_tag=action.case_tag
))
for action in self.actions.get_all_actions():
if not action.case_type and (not isinstance(action, LoadUpdateAction) or not action.auto_select):
errors.append({'type': "no case type in action", 'case_tag': action.case_tag})
if isinstance(action, LoadUpdateAction) and action.auto_select:
mode = action.auto_select.mode
if not action.auto_select.value_key:
key_names = {
AUTO_SELECT_CASE: _('Case property'),
AUTO_SELECT_FIXTURE: _('Lookup Table field'),
AUTO_SELECT_USER: _('custom user property'),
AUTO_SELECT_RAW: _('custom XPath expression'),
}
if mode in key_names:
errors.append({'type': 'auto select key', 'key_name': key_names[mode]})
if not action.auto_select.value_source:
source_names = {
AUTO_SELECT_CASE: _('Case tag'),
AUTO_SELECT_FIXTURE: _('Lookup Table tag'),
}
if mode in source_names:
errors.append({'type': 'auto select source', 'source_name': source_names[mode]})
elif mode == AUTO_SELECT_CASE:
case_tag = action.auto_select.value_source
if not self.actions.get_action_from_tag(case_tag):
errors.append({'type': 'auto select case ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
all_names=action.get_property_names(),
case_tag=action.case_tag
))
if self.form_filter:
form_filter_references_case = (
xpath_references_case(self.form_filter) or
xpath_references_user_case(self.form_filter)
)
if form_filter_references_case:
if not any(action for action in self.actions.load_update_cases if not action.auto_select):
errors.append({'type': "filtering without case"})
def generate_paths():
for action in self.actions.get_all_actions():
for path in action.get_paths():
yield path
if self.schedule:
if self.schedule.transition_condition.type == 'if':
yield self.schedule.transition_condition.question
if self.schedule.termination_condition.type == 'if':
yield self.schedule.termination_condition.question
errors.extend(self.check_paths(generate_paths()))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
module = self.get_module()
if validate_module:
errors.extend(module.get_case_errors(
needs_case_type=False,
needs_case_detail=module.requires_case_details(),
needs_referral_detail=False,
))
return errors
def get_case_updates(self, case_type):
updates = set()
format_key = self.get_case_property_name_formatter()
for action in self.actions.get_all_actions():
if action.case_type == case_type:
updates.update(format_key(*item)
for item in action.case_properties.iteritems())
if self.schedule and self.schedule.enabled and self.source:
xform = self.wrapped_xform()
self.add_stuff_to_xform(xform)
scheduler_updates = xform.get_scheduler_case_updates()[case_type]
else:
scheduler_updates = set()
return updates.union(scheduler_updates)
@property
def case_references(self):
return {}
@case_references.setter
def case_references(self, refs):
pass
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.get_subcase_actions():
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
for case_index in subcase.case_indices:
parent = self.actions.get_action_from_tag(case_index.tag)
if parent:
parent_types.add((parent.case_type, case_index.reference_id or 'parent'))
return parent_types, case_properties
def update_app_case_meta(self, app_case_meta):
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
questions = {
q['value']: FormQuestionResponse(q)
for q in self.get_questions(self.get_app().langs, include_translations=True)
}
for action in self.actions.load_update_cases:
for name, question_path in action.case_properties.items():
self.add_property_save(
app_case_meta,
action.case_type,
name,
questions,
question_path
)
for question_path, name in action.preload.items():
self.add_property_load(
app_case_meta,
action.case_type,
name,
questions,
question_path
)
if action.close_condition.is_active():
meta = app_case_meta.get_type(action.case_type)
meta.add_closer(self.unique_id, action.close_condition)
for action in self.actions.open_cases:
self.add_property_save(
app_case_meta,
action.case_type,
'name',
questions,
action.name_path,
action.open_condition
)
for name, question_path in action.case_properties.items():
self.add_property_save(
app_case_meta,
action.case_type,
name,
questions,
question_path,
action.open_condition
)
meta = app_case_meta.get_type(action.case_type)
meta.add_opener(self.unique_id, action.open_condition)
if action.close_condition.is_active():
meta.add_closer(self.unique_id, action.close_condition)
class SchedulePhaseForm(IndexedSchema):
"""
A reference to a form in a schedule phase.
"""
form_id = FormIdProperty("modules[*].schedule_phases[*].forms[*].form_id")
class SchedulePhase(IndexedSchema):
"""
SchedulePhases are attached to a module.
A Schedule Phase is a grouping of forms that occur within a period and share an anchor
A module should not have more than one SchedulePhase with the same anchor
anchor: Case property containing a date after which this phase becomes active
forms: The forms that are to be filled out within this phase
"""
anchor = StringProperty()
forms = SchemaListProperty(SchedulePhaseForm)
@property
def id(self):
""" A Schedule Phase is 1-indexed """
_id = super(SchedulePhase, self).id
return _id + 1
@property
def phase_id(self):
return "{}_{}".format(self.anchor, self.id)
def get_module(self):
return self._parent
_get_forms = IndexedSchema.Getter('forms')
def get_forms(self):
"""Returns the actual form objects related to this phase"""
module = self.get_module()
return (module.get_form_by_unique_id(form.form_id) for form in self._get_forms())
def get_form(self, desired_form):
return next((form for form in self.get_forms() if form.unique_id == desired_form.unique_id), None)
def get_phase_form_index(self, form):
"""
Returns the index of the form with respect to the phase
schedule_phase.forms = [a,b,c]
schedule_phase.get_phase_form_index(b)
=> 1
schedule_phase.get_phase_form_index(c)
=> 2
"""
return next((phase_form.id for phase_form in self._get_forms() if phase_form.form_id == form.unique_id),
None)
def remove_form(self, form):
"""Remove a form from the phase"""
idx = self.get_phase_form_index(form)
if idx is None:
raise ScheduleError("That form doesn't exist in the phase")
self.forms.remove(self.forms[idx])
def add_form(self, form):
"""Adds a form to this phase, removing it from other phases"""
old_phase = form.get_phase()
if old_phase is not None and old_phase.anchor != self.anchor:
old_phase.remove_form(form)
if self.get_form(form) is None:
self.forms.append(SchedulePhaseForm(form_id=form.unique_id))
def change_anchor(self, new_anchor):
if new_anchor is None or new_anchor.strip() == '':
raise ScheduleError(_("You can't create a phase without an anchor property"))
self.anchor = new_anchor
if self.get_module().phase_anchors.count(new_anchor) > 1:
raise ScheduleError(_("You can't have more than one phase with the anchor {}").format(new_anchor))
class AdvancedModule(ModuleBase):
module_type = 'advanced'
case_label = DictProperty()
forms = SchemaListProperty(AdvancedForm)
case_details = SchemaProperty(DetailPair)
product_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
has_schedule = BooleanProperty()
schedule_phases = SchemaListProperty(SchedulePhase)
get_schedule_phases = IndexedSchema.Getter('schedule_phases')
search_config = SchemaProperty(CaseSearch)
@classmethod
def wrap(cls, data):
# lazy migration to accommodate search_config as empty list
# http://manage.dimagi.com/default.asp?231186
if data.get('search_config') == []:
data['search_config'] = {}
return super(AdvancedModule, cls).wrap(data)
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = AdvancedModule(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = AdvancedForm(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
form.schedule = FormSchedule(enabled=False)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, AdvancedForm):
new_form = form
elif isinstance(form, Form):
new_form = AdvancedForm(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
actions = form.active_actions()
open = actions.get('open_case', None)
update = actions.get('update_case', None)
close = actions.get('close_case', None)
preload = actions.get('case_preload', None)
subcases = actions.get('subcases', None)
case_type = from_module.case_type
base_action = None
if open:
base_action = AdvancedOpenCaseAction(
case_type=case_type,
case_tag='open_{0}_0'.format(case_type),
name_path=open.name_path,
open_condition=open.condition,
case_properties=update.update if update else {},
)
new_form.actions.open_cases.append(base_action)
elif update or preload or close:
base_action = LoadUpdateAction(
case_type=case_type,
case_tag='load_{0}_0'.format(case_type),
case_properties=update.update if update else {},
preload=preload.preload if preload else {}
)
if from_module.parent_select.active:
app = self.get_app()
select_chain = get_select_chain(app, from_module, include_self=False)
for n, link in enumerate(reversed(list(enumerate(select_chain)))):
i, module = link
new_form.actions.load_update_cases.append(LoadUpdateAction(
case_type=module.case_type,
case_tag='_'.join(['parent'] * (i + 1)),
details_module=module.unique_id,
case_index=CaseIndex(tag='_'.join(['parent'] * (i + 2)) if n > 0 else '')
))
base_action.case_indices = [CaseIndex(tag='parent')]
if close:
base_action.close_condition = close.condition
new_form.actions.load_update_cases.append(base_action)
if subcases:
for i, subcase in enumerate(subcases):
open_subcase_action = AdvancedOpenCaseAction(
case_type=subcase.case_type,
case_tag='open_{0}_{1}'.format(subcase.case_type, i+1),
name_path=subcase.case_name,
open_condition=subcase.condition,
case_properties=subcase.case_properties,
repeat_context=subcase.repeat_context,
case_indices=[CaseIndex(
tag=base_action.case_tag if base_action else '',
reference_id=subcase.reference_id,
)]
)
new_form.actions.open_cases.append(open_subcase_action)
else:
raise IncompatibleFormTypeException()
if index is not None:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(AdvancedModule, self).rename_lang(old_lang, new_lang)
self.case_list.rename_lang(old_lang, new_lang)
def requires_case_details(self):
if self.case_list.show:
return True
for form in self.forms:
if any(action.case_type == self.case_type for action in form.actions.load_update_cases):
return True
def all_forms_require_a_case(self):
return all(form.requires_case() for form in self.forms)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('product_short', self.product_details.short, self.get_app().commtrack_enabled),
('product_long', self.product_details.long, False),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
if self.get_app().commtrack_enabled and not self.product_details.short.columns:
for form in self.forms:
if self.case_list.show or \
any(action.show_product_stock for action in form.actions.load_update_cases):
yield {
'type': 'no product detail',
'module': module_info,
}
break
columns = self.case_details.short.columns + self.case_details.long.columns
if self.get_app().commtrack_enabled:
columns += self.product_details.short.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
def validate_for_build(self):
errors = super(AdvancedModule, self).validate_for_build()
if not self.forms and not self.case_list.show:
errors.append({
'type': 'no forms or case list',
'module': self.get_module_info(),
})
if self.case_list_form.form_id:
forms = self.forms
case_tag = None
loaded_case_types = None
for form in forms:
info = self.get_module_info()
form_info = {"id": form.id if hasattr(form, 'id') else None, "name": form.name}
non_auto_select_actions = [a for a in form.actions.load_update_cases if not a.auto_select]
this_forms_loaded_case_types = {action.case_type for action in non_auto_select_actions}
if loaded_case_types is None:
loaded_case_types = this_forms_loaded_case_types
elif loaded_case_types != this_forms_loaded_case_types:
errors.append({
'type': 'all forms in case list module must load the same cases',
'module': info,
'form': form_info,
})
if not non_auto_select_actions:
errors.append({
'type': 'case list module form must require case',
'module': info,
'form': form_info,
})
elif len(non_auto_select_actions) != 1:
for index, action in reversed(list(enumerate(non_auto_select_actions))):
if (
index > 0 and
non_auto_select_actions[index - 1].case_tag not in (p.tag for p in action.case_indices)
):
errors.append({
'type': 'case list module form can only load parent cases',
'module': info,
'form': form_info,
})
case_action = non_auto_select_actions[-1] if non_auto_select_actions else None
if case_action and case_action.case_type != self.case_type:
errors.append({
'type': 'case list module form must match module case type',
'module': info,
'form': form_info,
})
# set case_tag if not already set
case_tag = case_action.case_tag if not case_tag and case_action else case_tag
if case_action and case_action.case_tag != case_tag:
errors.append({
'type': 'all forms in case list module must have same case management',
'module': info,
'form': form_info,
'expected_tag': case_tag
})
if case_action and case_action.details_module and case_action.details_module != self.unique_id:
errors.append({
'type': 'forms in case list module must use modules details',
'module': info,
'form': form_info,
})
return errors
def _uses_case_type(self, case_type, invert_match=False):
return any(form.uses_case_type(case_type, invert_match) for form in self.forms)
def uses_usercase(self):
"""Return True if this module has any forms that use the usercase.
"""
return self._uses_case_type(USERCASE_TYPE)
@property
def phase_anchors(self):
return [phase.anchor for phase in self.schedule_phases]
def get_or_create_schedule_phase(self, anchor):
"""Returns a tuple of (phase, new?)"""
if anchor is None or anchor.strip() == '':
raise ScheduleError(_("You can't create a phase without an anchor property"))
phase = next((phase for phase in self.get_schedule_phases() if phase.anchor == anchor), None)
is_new_phase = False
if phase is None:
self.schedule_phases.append(SchedulePhase(anchor=anchor))
# TODO: is there a better way of doing this?
phase = list(self.get_schedule_phases())[-1] # get the phase from the module so we know the _parent
is_new_phase = True
return (phase, is_new_phase)
def _clear_schedule_phases(self):
self.schedule_phases = []
def update_schedule_phases(self, anchors):
""" Take a list of anchors, reorders, deletes and creates phases from it """
old_phases = {phase.anchor: phase for phase in self.get_schedule_phases()}
self._clear_schedule_phases()
for anchor in anchors:
try:
self.schedule_phases.append(old_phases.pop(anchor))
except KeyError:
self.get_or_create_schedule_phase(anchor)
deleted_phases_with_forms = [anchor for anchor, phase in old_phases.iteritems() if len(phase.forms)]
if deleted_phases_with_forms:
raise ScheduleError(_("You can't delete phases with anchors "
"{phase_anchors} because they have forms attached to them").format(
phase_anchors=(", ").join(deleted_phases_with_forms)))
return self.get_schedule_phases()
def update_schedule_phase_anchors(self, new_anchors):
""" takes a list of tuples (id, new_anchor) and updates the phase anchors """
for anchor in new_anchors:
id = anchor[0] - 1
new_anchor = anchor[1]
try:
list(self.get_schedule_phases())[id].change_anchor(new_anchor)
except IndexError:
pass # That phase wasn't found, so we can't change it's anchor. Ignore it
class CareplanForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'careplan_form'
mode = StringProperty(required=True, choices=['create', 'update'])
custom_case_updates = DictProperty()
case_preload = DictProperty()
@classmethod
def wrap(cls, data):
if cls is CareplanForm:
doc_type = data['doc_type']
if doc_type == 'CareplanGoalForm':
return CareplanGoalForm.wrap(data)
elif doc_type == 'CareplanTaskForm':
return CareplanTaskForm.wrap(data)
else:
raise ValueError('Unexpected doc_type for CareplanForm', doc_type)
else:
return super(CareplanForm, cls).wrap(data)
def add_stuff_to_xform(self, xform, build_profile_id=None):
super(CareplanForm, self).add_stuff_to_xform(xform, build_profile_id)
xform.add_care_plan(self)
def get_case_updates(self, case_type):
if case_type == self.case_type:
format_key = self.get_case_property_name_formatter()
return [format_key(*item) for item in self.case_updates().iteritems()]
else:
return []
def get_case_type(self):
return self.case_type
def get_parent_case_type(self):
return self._parent.case_type
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
if case_type == self.case_type:
if case_type == CAREPLAN_GOAL:
parent_types.add((module_case_type, 'parent'))
elif case_type == CAREPLAN_TASK:
parent_types.add((CAREPLAN_GOAL, 'goal'))
case_properties.update(self.case_updates().keys())
return parent_types, case_properties
def is_registration_form(self, case_type=None):
return self.mode == 'create' and (not case_type or self.case_type == case_type)
def update_app_case_meta(self, app_case_meta):
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
questions = {
q['value']: FormQuestionResponse(q)
for q in self.get_questions(self.get_app().langs, include_translations=True)
}
meta = app_case_meta.get_type(self.case_type)
for name, question_path in self.case_updates().items():
self.add_property_save(
app_case_meta,
self.case_type,
name,
questions,
question_path
)
for name, question_path in self.case_preload.items():
self.add_property_load(
app_case_meta,
self.case_type,
name,
questions,
question_path
)
meta.add_opener(self.unique_id, FormActionCondition(
type='always',
))
meta.add_closer(self.unique_id, FormActionCondition(
type='if',
question=self.close_path,
answer='yes',
))
class CareplanGoalForm(CareplanForm):
case_type = CAREPLAN_GOAL
name_path = StringProperty(required=True, default='/data/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
close_path = StringProperty(required=True, default='/data/close_goal')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanGoalForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'update':
form.description_path = '/data/description_group/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
'description': self.description_path,
})
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('description_path', 'description', _('Description')),
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [q('name_path', 'name', _('Name'))] + questions
else:
return questions + [q('close_path', 'close', _('Close if'))]
class CareplanTaskForm(CareplanForm):
case_type = CAREPLAN_TASK
name_path = StringProperty(required=True, default='/data/task_repeat/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
latest_report_path = StringProperty(required=True, default='/data/progress_group/progress_update')
close_path = StringProperty(required=True, default='/data/task_complete')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanTaskForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'create':
form.date_followup_path = '/data/task_repeat/date_followup'
form.description_path = '/data/task_repeat/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
})
if self.mode == 'create':
changes['description'] = self.description_path
else:
changes['latest_report'] = self.latest_report_path
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [
q('name_path', 'name', _('Name')),
q('description_path', 'description', _('Description')),
] + questions
else:
return questions + [
q('latest_report_path', 'latest_report', _('Latest report')),
q('close_path', 'close', _('Close if')),
]
class CareplanModule(ModuleBase):
"""
A set of forms and configuration for managing the Care Plan workflow.
"""
module_type = 'careplan'
parent_select = SchemaProperty(ParentSelect)
display_separately = BooleanProperty(default=False)
forms = SchemaListProperty(CareplanForm)
goal_details = SchemaProperty(DetailPair)
task_details = SchemaProperty(DetailPair)
@classmethod
def new_module(cls, name, lang, target_module_id, target_case_type):
lang = lang or 'en'
module = CareplanModule(
name={lang: name or ugettext("Care Plan")},
parent_select=ParentSelect(
active=True,
relationship='parent',
module_id=target_module_id
),
case_type=target_case_type,
goal_details=DetailPair(
short=cls._get_detail(lang, 'goal_short'),
long=cls._get_detail(lang, 'goal_long'),
),
task_details=DetailPair(
short=cls._get_detail(lang, 'task_short'),
long=cls._get_detail(lang, 'task_long'),
)
)
module.get_or_create_unique_id()
return module
@classmethod
def _get_detail(cls, lang, detail_type):
header = ugettext('Goal') if detail_type.startswith('goal') else ugettext('Task')
columns = [
DetailColumn(
format='plain',
header={lang: header},
field='name',
model='case'),
DetailColumn(
format='date',
header={lang: ugettext("Followup")},
field='date_followup',
model='case')]
if detail_type.endswith('long'):
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Description")},
field='description',
model='case'))
if detail_type == 'tasks_long':
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Last update")},
field='latest_report',
model='case'))
return Detail(type=detail_type, columns=columns)
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, CareplanForm):
if index is not None:
self.forms.insert(index, form)
else:
self.forms.append(form)
return self.get_form(index or -1)
else:
raise IncompatibleFormTypeException()
def requires_case_details(self):
return True
def get_case_types(self):
return set([self.case_type]) | set(f.case_type for f in self.forms)
def get_form_by_type(self, case_type, mode):
for form in self.get_forms():
if form.case_type == case_type and form.mode == mode:
return form
def get_details(self):
return (
('%s_short' % CAREPLAN_GOAL, self.goal_details.short, True),
('%s_long' % CAREPLAN_GOAL, self.goal_details.long, True),
('%s_short' % CAREPLAN_TASK, self.task_details.short, True),
('%s_long' % CAREPLAN_TASK, self.task_details.long, True),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.goal_details.short.columns:
yield {
'type': 'no case detail for goals',
'module': module_info,
}
if not self.task_details.short.columns:
yield {
'type': 'no case detail for tasks',
'module': module_info,
}
columns = self.goal_details.short.columns + self.goal_details.long.columns
columns += self.task_details.short.columns + self.task_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
def validate_for_build(self):
errors = super(CareplanModule, self).validate_for_build()
if not self.forms:
errors.append({
'type': 'no forms',
'module': self.get_module_info(),
})
return errors
class ReportGraphConfig(DocumentSchema):
graph_type = StringProperty(
choices=[
'bar',
'time',
'xy',
],
default='bar',
required=True,
)
series_configs = DictProperty(DictProperty)
config = DictProperty()
class ReportAppFilter(DocumentSchema):
@classmethod
def wrap(cls, data):
if cls is ReportAppFilter:
doc_type = data['doc_type']
doc_type_to_filter_class = {
'AutoFilter': AutoFilter,
'CustomDataAutoFilter': CustomDataAutoFilter,
'StaticChoiceFilter': StaticChoiceFilter,
'StaticChoiceListFilter': StaticChoiceListFilter,
'StaticDatespanFilter': StaticDatespanFilter,
'CustomDatespanFilter': CustomDatespanFilter,
'CustomMonthFilter': CustomMonthFilter,
'MobileSelectFilter': MobileSelectFilter,
'AncestorLocationTypeFilter': AncestorLocationTypeFilter,
'NumericFilter': NumericFilter,
}
try:
klass = doc_type_to_filter_class[doc_type]
except KeyError:
raise ValueError('Unexpected doc_type for ReportAppFilter', doc_type)
else:
return klass.wrap(data)
else:
return super(ReportAppFilter, cls).wrap(data)
def get_filter_value(self, user, ui_filter):
raise NotImplementedError
def _filter_by_case_sharing_group_id(user, ui_filter):
from corehq.apps.reports_core.filters import Choice
return [
Choice(value=group._id, display=None)
for group in user.get_case_sharing_groups()
]
def _filter_by_location_id(user, ui_filter):
return ui_filter.value(**{ui_filter.name: user.location_id})
def _filter_by_username(user, ui_filter):
from corehq.apps.reports_core.filters import Choice
return Choice(value=user.raw_username, display=None)
def _filter_by_user_id(user, ui_filter):
from corehq.apps.reports_core.filters import Choice
return Choice(value=user._id, display=None)
def _filter_by_parent_location_id(user, ui_filter):
location = user.sql_location
location_parent = location.parent.location_id if location and location.parent else None
return ui_filter.value(**{ui_filter.name: location_parent})
_filter_type_to_func = {
'case_sharing_group': _filter_by_case_sharing_group_id,
'location_id': _filter_by_location_id,
'parent_location_id': _filter_by_parent_location_id,
'username': _filter_by_username,
'user_id': _filter_by_user_id,
}
class AutoFilter(ReportAppFilter):
filter_type = StringProperty(choices=_filter_type_to_func.keys())
def get_filter_value(self, user, ui_filter):
return _filter_type_to_func[self.filter_type](user, ui_filter)
class CustomDataAutoFilter(ReportAppFilter):
custom_data_property = StringProperty()
def get_filter_value(self, user, ui_filter):
from corehq.apps.reports_core.filters import Choice
return Choice(value=user.user_data[self.custom_data_property], display=None)
class StaticChoiceFilter(ReportAppFilter):
select_value = StringProperty()
def get_filter_value(self, user, ui_filter):
from corehq.apps.reports_core.filters import Choice
return [Choice(value=self.select_value, display=None)]
class StaticChoiceListFilter(ReportAppFilter):
value = StringListProperty()
def get_filter_value(self, user, ui_filter):
from corehq.apps.reports_core.filters import Choice
return [Choice(value=string_value, display=None) for string_value in self.value]
class StaticDatespanFilter(ReportAppFilter):
date_range = StringProperty(
choices=[choice.slug for choice in get_simple_dateranges()],
required=True,
)
def get_filter_value(self, user, ui_filter):
start_date, end_date = get_daterange_start_end_dates(self.date_range)
return DateSpan(startdate=start_date, enddate=end_date)
class CustomDatespanFilter(ReportAppFilter):
operator = StringProperty(
choices=[
'=',
'<=',
'>=',
'>',
'<',
'between'
],
required=True,
)
date_number = StringProperty(required=True)
date_number2 = StringProperty()
def get_filter_value(self, user, ui_filter):
assert user is not None, (
"CustomDatespanFilter.get_filter_value must be called "
"with an OTARestoreUser object, not None")
timezone = get_timezone_for_domain(user.domain)
today = ServerTime(datetime.datetime.utcnow()).user_time(timezone).done().date()
start_date = end_date = None
days = int(self.date_number)
if self.operator == 'between':
days2 = int(self.date_number2)
# allows user to have specified the two numbers in either order
if days > days2:
end = days2
start = days
else:
start = days2
end = days
start_date = today - datetime.timedelta(days=start)
end_date = today - datetime.timedelta(days=end)
elif self.operator == '=':
start_date = end_date = today - datetime.timedelta(days=days)
elif self.operator == '>=':
start_date = None
end_date = today - datetime.timedelta(days=days)
elif self.operator == '<=':
start_date = today - datetime.timedelta(days=days)
end_date = None
elif self.operator == '<':
start_date = today - datetime.timedelta(days=days - 1)
end_date = None
elif self.operator == '>':
start_date = None
end_date = today - datetime.timedelta(days=days + 1)
return DateSpan(startdate=start_date, enddate=end_date)
def is_lte(integer):
def validate(x):
if not x <= integer:
raise BadValueError('Value must be less than or equal to {}'.format(integer))
return validate
def is_gte(integer):
def validate(x):
if not x >= integer:
raise BadValueError('Value must be greater than or equal to {}'.format(integer))
return validate
class CustomMonthFilter(ReportAppFilter):
"""
Filter by months that start on a day number other than 1
See [FB 215656](http://manage.dimagi.com/default.asp?215656)
"""
# Values for start_of_month < 1 specify the number of days from the end of the month. Values capped at
# len(February).
start_of_month = IntegerProperty(
required=True,
validators=(is_gte(-27), is_lte(28))
)
# DateSpan to return i.t.o. number of months to go back
period = IntegerProperty(
default=DEFAULT_MONTH_FILTER_PERIOD_LENGTH,
validators=(is_gte(0),)
)
@classmethod
def wrap(cls, doc):
doc['start_of_month'] = int(doc['start_of_month'])
if 'period' in doc:
doc['period'] = int(doc['period'] or DEFAULT_MONTH_FILTER_PERIOD_LENGTH)
return super(CustomMonthFilter, cls).wrap(doc)
def get_filter_value(self, user, ui_filter):
def get_last_month(this_month):
return datetime.date(this_month.year, this_month.month, 1) - datetime.timedelta(days=1)
def get_last_day(date):
_, last_day = calendar.monthrange(date.year, date.month)
return last_day
start_of_month = int(self.start_of_month)
today = datetime.date.today()
if start_of_month > 0:
start_day = start_of_month
else:
# start_of_month is zero or negative. Work backwards from the end of the month
start_day = get_last_day(today) + start_of_month
# Loop over months backwards for period > 0
month = today if today.day >= start_day else get_last_month(today)
for i in range(int(self.period)):
month = get_last_month(month)
if start_of_month > 0:
start_date = datetime.date(month.year, month.month, start_day)
days = get_last_day(start_date) - 1
end_date = start_date + datetime.timedelta(days=days)
else:
start_day = get_last_day(month) + start_of_month
start_date = datetime.date(month.year, month.month, start_day)
next_month = datetime.date(month.year, month.month, get_last_day(month)) + datetime.timedelta(days=1)
end_day = get_last_day(next_month) + start_of_month - 1
end_date = datetime.date(next_month.year, next_month.month, end_day)
return DateSpan(startdate=start_date, enddate=end_date)
class MobileSelectFilter(ReportAppFilter):
def get_filter_value(self, user, ui_filter):
return None
class AncestorLocationTypeFilter(ReportAppFilter):
ancestor_location_type_name = StringProperty()
def get_filter_value(self, user, ui_filter):
from corehq.apps.locations.models import SQLLocation
try:
ancestor = user.sql_location.get_ancestors(include_self=True).\
get(location_type__name=self.ancestor_location_type_name)
except (AttributeError, SQLLocation.DoesNotExist):
# user.sql_location is None, or location does not have an ancestor of that type
return None
return ancestor.location_id
class NumericFilter(ReportAppFilter):
operator = StringProperty(choices=['=', '!=', '<', '<=', '>', '>=']),
operand = FloatProperty()
@classmethod
def wrap(cls, doc):
doc['operand'] = float(doc['operand'])
return super(NumericFilter, cls).wrap(doc)
def get_filter_value(self, user, ui_filter):
return {
'operator': self.operator,
'operand': self.operand,
}
class ReportAppConfig(DocumentSchema):
"""
Class for configuring how a user configurable report shows up in an app
"""
report_id = StringProperty(required=True)
header = DictProperty()
localized_description = DictProperty()
xpath_description = StringProperty()
use_xpath_description = BooleanProperty(default=False)
graph_configs = DictProperty(ReportGraphConfig)
filters = SchemaDictProperty(ReportAppFilter)
uuid = StringProperty(required=True)
_report = None
def __init__(self, *args, **kwargs):
super(ReportAppConfig, self).__init__(*args, **kwargs)
if not self.uuid:
self.uuid = random_hex()
@classmethod
def wrap(cls, doc):
# for backwards compatibility with apps that have localized or xpath descriptions
old_description = doc.get('description')
if old_description:
if isinstance(old_description, basestring) and not doc.get('xpath_description'):
doc['xpath_description'] = old_description
elif isinstance(old_description, dict) and not doc.get('localized_description'):
doc['localized_description'] = old_description
if not doc.get('xpath_description'):
doc['xpath_description'] = '""'
return super(ReportAppConfig, cls).wrap(doc)
def report(self, domain):
if self._report is None:
from corehq.apps.userreports.models import get_report_config
self._report = get_report_config(self.report_id, domain)[0]
return self._report
class ReportModule(ModuleBase):
"""
Module for user configurable reports
"""
module_type = 'report'
report_configs = SchemaListProperty(ReportAppConfig)
forms = []
_loaded = False
@property
@memoized
def reports(self):
from corehq.apps.userreports.models import get_report_configs
return get_report_configs([r.report_id for r in self.report_configs], self.get_app().domain)
@classmethod
def new_module(cls, name, lang):
module = ReportModule(
name={(lang or 'en'): name or ugettext("Reports")},
case_type='',
)
module.get_or_create_unique_id()
return module
def get_details(self):
from .suite_xml.features.mobile_ucr import ReportModuleSuiteHelper
return ReportModuleSuiteHelper(self).get_details()
def get_custom_entries(self):
from .suite_xml.features.mobile_ucr import ReportModuleSuiteHelper
return ReportModuleSuiteHelper(self).get_custom_entries()
def get_menus(self, supports_module_filter=False):
kwargs = {}
if supports_module_filter:
kwargs['relevant'] = interpolate_xpath(self.module_filter)
menu = suite_models.LocalizedMenu(
id=id_strings.menu_id(self),
menu_locale_id=id_strings.module_locale(self),
media_image=bool(len(self.all_image_paths())),
media_audio=bool(len(self.all_audio_paths())),
image_locale_id=id_strings.module_icon_locale(self),
audio_locale_id=id_strings.module_audio_locale(self),
**kwargs
)
menu.commands.extend([
suite_models.Command(id=id_strings.report_command(config.uuid))
for config in self.report_configs
])
yield menu
def check_report_validity(self):
"""
returns is_valid, valid_report_configs
If any report doesn't exist, is_valid is False, otherwise True
valid_report_configs is a list of all report configs that refer to existing reports
"""
try:
all_report_ids = [report._id for report in self.reports]
valid_report_configs = [report_config for report_config in self.report_configs
if report_config.report_id in all_report_ids]
is_valid = (len(valid_report_configs) == len(self.report_configs))
except ReportConfigurationNotFoundError:
valid_report_configs = [] # assuming that if one report is in a different domain, they all are
is_valid = False
return namedtuple('ReportConfigValidity', 'is_valid valid_report_configs')(
is_valid=is_valid,
valid_report_configs=valid_report_configs
)
def validate_for_build(self):
errors = super(ReportModule, self).validate_for_build()
if not self.check_report_validity().is_valid:
errors.append({
'type': 'report config ref invalid',
'module': self.get_module_info()
})
return errors
class ShadowModule(ModuleBase, ModuleDetailsMixin):
"""
A module that acts as a shortcut to another module. This module has its own
settings (name, icon/audio, filter, etc.) and its own case list/detail, but
inherits case type and forms from its source module.
"""
module_type = 'shadow'
source_module_id = StringProperty()
forms = []
excluded_form_ids = SchemaListProperty()
case_details = SchemaProperty(DetailPair)
ref_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
referral_list = SchemaProperty(CaseList)
task_list = SchemaProperty(CaseList)
parent_select = SchemaProperty(ParentSelect)
get_forms = IndexedSchema.Getter('forms')
@classmethod
def wrap(cls, data):
data = cls.wrap_details(data)
return super(ShadowModule, cls).wrap(data)
@property
def source_module(self):
if self.source_module_id:
try:
return self._parent.get_module_by_unique_id(self.source_module_id,
error=_("Could not find source module for '{}'.").format(self.default_name()))
except ModuleNotFoundException:
pass
return None
@property
def case_type(self):
if not self.source_module:
return None
return self.source_module.case_type
@property
def requires(self):
if not self.source_module:
return 'none'
return self.source_module.requires
@property
def root_module_id(self):
if not self.source_module:
return None
return self.source_module.root_module_id
def get_suite_forms(self):
if not self.source_module:
return []
return [f for f in self.source_module.get_forms() if f.unique_id not in self.excluded_form_ids]
@parse_int([1])
def get_form(self, i):
return None
def requires_case_details(self):
if not self.source_module:
return False
return self.source_module.requires_case_details()
def get_case_types(self):
if not self.source_module:
return []
return self.source_module.get_case_types()
@memoized
def get_subcase_types(self):
if not self.source_module:
return []
return self.source_module.get_subcase_types()
@memoized
def all_forms_require_a_case(self):
if not self.source_module:
return []
return self.source_module.all_forms_require_a_case()
@classmethod
def new_module(cls, name, lang):
lang = lang or 'en'
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = ShadowModule(
name={(lang or 'en'): name or ugettext("Untitled Module")},
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
)
module.get_or_create_unique_id()
return module
def validate_for_build(self):
errors = super(ShadowModule, self).validate_for_build()
errors += self.validate_details_for_build()
if not self.source_module:
errors.append({
'type': 'no source module id',
'module': self.get_module_info()
})
return errors
class LazyBlobDoc(BlobMixin):
"""LazyAttachmentDoc for blob db
Cache blobs in local memory (for this request)
and in django cache (for the next few requests)
and commit to couchdb.
See also `dimagi.utils.couch.lazy_attachment_doc.LazyAttachmentDoc`
Cache strategy:
- on fetch, check in local memory, then cache
- if both are a miss, fetch from couchdb and store in both
- after an attachment is committed to the blob db and the
save save has succeeded, save the attachment in the cache
"""
def __init__(self, *args, **kwargs):
super(LazyBlobDoc, self).__init__(*args, **kwargs)
self._LAZY_ATTACHMENTS = {}
# to cache fetched attachments
# these we do *not* send back down upon save
self._LAZY_ATTACHMENTS_CACHE = {}
@classmethod
def wrap(cls, data):
if "_attachments" in data:
data = data.copy()
attachments = data.pop("_attachments").copy()
if cls.migrating_blobs_from_couch:
# preserve stubs so couch attachments don't get deleted on save
stubs = {}
for name, value in list(attachments.items()):
if isinstance(value, dict) and "stub" in value:
stubs[name] = attachments.pop(name)
if stubs:
data["_attachments"] = stubs
else:
attachments = None
self = super(LazyBlobDoc, cls).wrap(data)
if attachments:
for name, attachment in attachments.items():
if isinstance(attachment, basestring):
info = {"content": attachment}
else:
raise ValueError("Unknown attachment format: {!r}"
.format(attachment))
self.lazy_put_attachment(name=name, **info)
return self
def __attachment_cache_key(self, name):
return u'lazy_attachment/{id}/{name}'.format(id=self.get_id, name=name)
def __set_cached_attachment(self, name, content, timeout=60*60*24):
cache.set(self.__attachment_cache_key(name), content, timeout=timeout)
self._LAZY_ATTACHMENTS_CACHE[name] = content
def __get_cached_attachment(self, name):
try:
# it has been fetched already during this request
content = self._LAZY_ATTACHMENTS_CACHE[name]
except KeyError:
content = cache.get(self.__attachment_cache_key(name))
if content is not None:
self._LAZY_ATTACHMENTS_CACHE[name] = content
return content
def put_attachment(self, content, name=None, *args, **kw):
cache.delete(self.__attachment_cache_key(name))
self._LAZY_ATTACHMENTS_CACHE.pop(name, None)
return super(LazyBlobDoc, self).put_attachment(content, name, *args, **kw)
def lazy_put_attachment(self, content, name=None, content_type=None,
content_length=None):
"""
Ensure the attachment is available through lazy_fetch_attachment
and that upon self.save(), the attachments are put to the doc as well
"""
self._LAZY_ATTACHMENTS[name] = {
'content': content,
'content_type': content_type,
'content_length': content_length,
}
def lazy_fetch_attachment(self, name):
# it has been put/lazy-put already during this request
if name in self._LAZY_ATTACHMENTS:
content = self._LAZY_ATTACHMENTS[name]['content']
else:
content = self.__get_cached_attachment(name)
if content is None:
try:
content = self.fetch_attachment(name)
except ResourceNotFound as e:
# django cache will pickle this exception for you
# but e.response isn't picklable
if hasattr(e, 'response'):
del e.response
content = e
self.__set_cached_attachment(name, content, timeout=60*5)
raise
else:
self.__set_cached_attachment(name, content)
if isinstance(content, ResourceNotFound):
raise content
return content
def lazy_list_attachments(self):
keys = set()
keys.update(getattr(self, '_LAZY_ATTACHMENTS', None) or {})
keys.update(self.blobs or {})
return keys
def save(self, **params):
def super_save():
super(LazyBlobDoc, self).save(**params)
if self._LAZY_ATTACHMENTS:
with self.atomic_blobs(super_save):
for name, info in self._LAZY_ATTACHMENTS.items():
if not info['content_type']:
info['content_type'] = ';'.join(filter(None, guess_type(name)))
super(LazyBlobDoc, self).put_attachment(name=name, **info)
# super_save() has succeeded by now
for name, info in self._LAZY_ATTACHMENTS.items():
self.__set_cached_attachment(name, info['content'])
self._LAZY_ATTACHMENTS.clear()
else:
super_save()
class VersionedDoc(LazyBlobDoc):
"""
A document that keeps an auto-incrementing version number, knows how to make copies of itself,
delete a copy of itself, and revert back to an earlier copy of itself.
"""
domain = StringProperty()
copy_of = StringProperty()
version = IntegerProperty()
short_url = StringProperty()
short_odk_url = StringProperty()
short_odk_media_url = StringProperty()
_meta_fields = ['_id', '_rev', 'domain', 'copy_of', 'version', 'short_url', 'short_odk_url', 'short_odk_media_url']
@property
def id(self):
return self._id
def save(self, response_json=None, increment_version=None, **params):
if increment_version is None:
increment_version = not self.copy_of
if increment_version:
self.version = self.version + 1 if self.version else 1
super(VersionedDoc, self).save(**params)
if response_json is not None:
if 'update' not in response_json:
response_json['update'] = {}
response_json['update']['app-version'] = self.version
def make_build(self):
assert self.get_id
assert self.copy_of is None
cls = self.__class__
copies = cls.view('app_manager/applications', key=[self.domain, self._id, self.version], include_docs=True, limit=1).all()
if copies:
copy = copies[0]
else:
copy = deepcopy(self.to_json())
bad_keys = ('_id', '_rev', '_attachments', 'external_blobs',
'short_url', 'short_odk_url', 'short_odk_media_url', 'recipients')
for bad_key in bad_keys:
if bad_key in copy:
del copy[bad_key]
copy = cls.wrap(copy)
copy['copy_of'] = self._id
copy.copy_attachments(self)
return copy
def copy_attachments(self, other, regexp=ATTACHMENT_REGEX):
for name in other.lazy_list_attachments() or {}:
if regexp is None or re.match(regexp, name):
self.lazy_put_attachment(other.lazy_fetch_attachment(name), name)
def make_reversion_to_copy(self, copy):
"""
Replaces couch doc with a copy of the backup ("copy").
Returns the another Application/RemoteApp referring to this
updated couch doc. The returned doc should be used in place of
the original doc, i.e. should be called as follows:
app = app.make_reversion_to_copy(copy)
app.save()
"""
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
app = deepcopy(copy.to_json())
app['_rev'] = self._rev
app['_id'] = self._id
app['version'] = self.version
app['copy_of'] = None
app.pop('_attachments', None)
app.pop('external_blobs', None)
cls = self.__class__
app = cls.wrap(app)
app.copy_attachments(copy)
return app
def delete_copy(self, copy):
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
copy.delete_app()
copy.save(increment_version=False)
def scrub_source(self, source):
"""
To be overridden.
Use this to scrub out anything
that should be shown in the
application source, such as ids, etc.
"""
return source
def export_json(self, dump_json=True):
source = deepcopy(self.to_json())
for field in self._meta_fields:
if field in source:
del source[field]
_attachments = {}
for name in self.lazy_list_attachments():
if re.match(ATTACHMENT_REGEX, name):
# FIXME loss of metadata (content type, etc.)
_attachments[name] = self.lazy_fetch_attachment(name)
# the '_attachments' value is a dict of `name: blob_content`
# pairs, and is part of the exported (serialized) app interface
source['_attachments'] = _attachments
source.pop("external_blobs", None)
source = self.scrub_source(source)
return json.dumps(source) if dump_json else source
@classmethod
def from_source(cls, source, domain):
for field in cls._meta_fields:
if field in source:
del source[field]
source['domain'] = domain
app = cls.wrap(source)
return app
def is_deleted(self):
return self.doc_type.endswith(DELETED_SUFFIX)
def unretire(self):
self.doc_type = self.get_doc_type()
self.save()
def get_doc_type(self):
if self.doc_type.endswith(DELETED_SUFFIX):
return self.doc_type[:-len(DELETED_SUFFIX)]
else:
return self.doc_type
def absolute_url_property(method):
"""
Helper for the various fully qualified application URLs
Turns a method returning an unqualified URL
into a property returning a fully qualified URL
(e.g., '/my_url/' => 'https://www.commcarehq.org/my_url/')
Expects `self.url_base` to be fully qualified url base
"""
@wraps(method)
def _inner(self):
return "%s%s" % (self.url_base, method(self))
return property(_inner)
class BuildProfile(DocumentSchema):
name = StringProperty()
langs = StringListProperty()
def __eq__(self, other):
return self.langs == other.langs
def __ne__(self, other):
return not self.__eq__(other)
class MediaList(DocumentSchema):
media_refs = StringListProperty()
class ApplicationBase(VersionedDoc, SnapshotMixin,
CommCareFeatureSupportMixin,
CommentMixin):
"""
Abstract base class for Application and RemoteApp.
Contains methods for generating the various files and zipping them into CommCare.jar
See note at top of file for high-level overview.
"""
recipients = StringProperty(default="")
# this is the supported way of specifying which commcare build to use
build_spec = SchemaProperty(BuildSpec)
platform = StringProperty(
choices=["nokia/s40", "nokia/s60", "winmo", "generic"],
default="nokia/s40"
)
text_input = StringProperty(
choices=['roman', 'native', 'custom-keys', 'qwerty'],
default="roman"
)
# The following properties should only appear on saved builds
# built_with stores a record of CommCare build used in a saved app
built_with = SchemaProperty(BuildRecord)
build_signed = BooleanProperty(default=True)
built_on = DateTimeProperty(required=False)
build_comment = StringProperty()
comment_from = StringProperty()
build_broken = BooleanProperty(default=False)
# not used yet, but nice for tagging/debugging
# currently only canonical value is 'incomplete-build',
# for when build resources aren't found where they should be
build_broken_reason = StringProperty()
# watch out for a past bug:
# when reverting to a build that happens to be released
# that got copied into into the new app doc, and when new releases were made,
# they were automatically starred
# AFAIK this is fixed in code, but my rear its ugly head in an as-yet-not-understood
# way for apps that already had this problem. Just keep an eye out
is_released = BooleanProperty(default=False)
# django-style salted hash of the admin password
admin_password = StringProperty()
# a=Alphanumeric, n=Numeric, x=Neither (not allowed)
admin_password_charset = StringProperty(choices=['a', 'n', 'x'], default='n')
langs = StringListProperty()
secure_submissions = BooleanProperty(default=False)
# metadata for data platform
amplifies_workers = StringProperty(
choices=[AMPLIFIES_YES, AMPLIFIES_NO, AMPLIFIES_NOT_SET],
default=AMPLIFIES_NOT_SET
)
amplifies_project = StringProperty(
choices=[AMPLIFIES_YES, AMPLIFIES_NO, AMPLIFIES_NOT_SET],
default=AMPLIFIES_NOT_SET
)
minimum_use_threshold = StringProperty(
default='15'
)
experienced_threshold = StringProperty(
default='3'
)
# exchange properties
cached_properties = DictProperty()
description = StringProperty()
deployment_date = DateTimeProperty()
phone_model = StringProperty()
user_type = StringProperty()
attribution_notes = StringProperty()
# always false for RemoteApp
case_sharing = BooleanProperty(default=False)
vellum_case_management = BooleanProperty(default=False)
build_profiles = SchemaDictProperty(BuildProfile)
# each language is a key and the value is a list of multimedia referenced in that language
media_language_map = SchemaDictProperty(MediaList)
use_j2me_endpoint = BooleanProperty(default=False)
# Whether or not the Application has had any forms submitted against it
has_submissions = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
should_save = False
# scrape for old conventions and get rid of them
if 'commcare_build' in data:
version, build_number = data['commcare_build'].split('/')
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_build']
if 'commcare_tag' in data:
version, build_number = current_builds.TAG_MAP[data['commcare_tag']]
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_tag']
if data.has_key("built_with") and isinstance(data['built_with'], basestring):
data['built_with'] = BuildSpec.from_string(data['built_with']).to_json()
if 'native_input' in data:
if 'text_input' not in data:
data['text_input'] = 'native' if data['native_input'] else 'roman'
del data['native_input']
if 'build_langs' in data:
if data['build_langs'] != data['langs'] and 'build_profiles' not in data:
data['build_profiles'] = {
uuid.uuid4().hex: dict(
name=', '.join(data['build_langs']),
langs=data['build_langs']
)
}
should_save = True
del data['build_langs']
if 'original_doc' in data:
data['copy_history'] = [data.pop('original_doc')]
should_save = True
data["description"] = data.get('description') or data.get('short_description')
self = super(ApplicationBase, cls).wrap(data)
if not self.build_spec or self.build_spec.is_null():
self.build_spec = get_default_build_spec()
if should_save:
self.save()
return self
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
def is_remote_app(self):
return False
def get_latest_app(self, released_only=True):
if released_only:
return get_app(self.domain, self.get_id, latest=True)
else:
return self.view('app_manager/applications',
startkey=[self.domain, self.get_id, {}],
endkey=[self.domain, self.get_id],
include_docs=True,
limit=1,
descending=True,
).first()
@memoized
def get_latest_saved(self):
"""
This looks really similar to get_latest_app, not sure why tim added
"""
doc = (get_latest_released_app_doc(self.domain, self._id) or
get_latest_build_doc(self.domain, self._id))
return self.__class__.wrap(doc) if doc else None
def set_admin_password(self, raw_password):
salt = os.urandom(5).encode('hex')
self.admin_password = make_password(raw_password, salt=salt)
if raw_password.isnumeric():
self.admin_password_charset = 'n'
elif raw_password.isalnum():
self.admin_password_charset = 'a'
else:
self.admin_password_charset = 'x'
def check_password_charset(self):
errors = []
if hasattr(self, 'profile'):
password_format = self.profile.get('properties', {}).get('password_format', 'n')
message = ('Your app requires {0} passwords '
'but the admin password is not {0}')
if password_format == 'n' and self.admin_password_charset in 'ax':
errors.append({'type': 'password_format',
'message': message.format('numeric')})
if password_format == 'a' and self.admin_password_charset in 'x':
errors.append({'type': 'password_format',
'message': message.format('alphanumeric')})
return errors
def get_build(self):
return self.build_spec.get_build()
@property
def build_version(self):
# `LooseVersion`s are smart!
# LooseVersion('2.12.0') > '2.2'
# (even though '2.12.0' < '2.2')
if self.build_spec.version:
return LooseVersion(self.build_spec.version)
@property
def commcare_minor_release(self):
"""This is mostly just for views"""
return '%d.%d' % self.build_spec.minor_release()
@property
def short_name(self):
return self.name if len(self.name) <= 12 else '%s..' % self.name[:10]
@property
def has_careplan_module(self):
return False
@property
def url_base(self):
custom_base_url = getattr(self, 'custom_base_url', None)
return custom_base_url or get_url_base()
@absolute_url_property
def post_url(self):
if self.secure_submissions:
url_name = 'receiver_secure_post_with_app_id'
else:
url_name = 'receiver_post_with_app_id'
return reverse(url_name, args=[self.domain, self.get_id])
@absolute_url_property
def key_server_url(self):
return reverse('key_server_url', args=[self.domain])
@absolute_url_property
def ota_restore_url(self):
return reverse('app_aware_restore', args=[self.domain, self._id])
@absolute_url_property
def form_record_url(self):
return '/a/%s/api/custom/pact_formdata/v1/' % self.domain
@absolute_url_property
def hq_profile_url(self):
# RemoteApp already has a property called "profile_url",
# Application.profile_url just points here to stop the conflict
# http://manage.dimagi.com/default.asp?227088#1149422
return "%s?latest=true" % (
reverse('download_profile', args=[self.domain, self._id])
)
@absolute_url_property
def media_profile_url(self):
return "%s?latest=true" % (
reverse('download_media_profile', args=[self.domain, self._id])
)
@property
def profile_loc(self):
return "jr://resource/profile.xml"
@absolute_url_property
def jar_url(self):
return reverse('download_jar', args=[self.domain, self._id])
def get_jar_path(self):
spec = {
'nokia/s40': 'Nokia/S40',
'nokia/s60': 'Nokia/S60',
'generic': 'Generic/Default',
'winmo': 'Native/WinMo'
}[self.platform]
if self.platform in ('nokia/s40', 'nokia/s60'):
spec += {
('native',): '-native-input',
('roman',): '-generic',
('custom-keys',): '-custom-keys',
('qwerty',): '-qwerty'
}[(self.text_input,)]
return spec
def get_jadjar(self):
return self.get_build().get_jadjar(self.get_jar_path(), self.use_j2me_endpoint)
def validate_fixtures(self):
if not domain_has_privilege(self.domain, privileges.LOOKUP_TABLES):
# remote apps don't support get_forms yet.
# for now they can circumvent the fixture limitation. sneaky bastards.
if hasattr(self, 'get_forms'):
for form in self.get_forms():
if form.has_fixtures:
raise PermissionDenied(_(
"Usage of lookup tables is not supported by your "
"current subscription. Please upgrade your "
"subscription before using this feature."
))
def validate_intents(self):
if domain_has_privilege(self.domain, privileges.CUSTOM_INTENTS):
return
if hasattr(self, 'get_forms'):
for form in self.get_forms():
intents = form.wrapped_xform().odk_intents
if intents:
if not domain_has_privilege(self.domain, privileges.TEMPLATED_INTENTS):
raise PermissionDenied(_(
"Usage of integrations is not supported by your "
"current subscription. Please upgrade your "
"subscription before using this feature."
))
else:
templates = next(app_callout_templates)
if len(set(intents) - set(t['id'] for t in templates)):
raise PermissionDenied(_(
"Usage of external integration is not supported by your "
"current subscription. Please upgrade your "
"subscription before using this feature."
))
def validate_jar_path(self):
build = self.get_build()
setting = commcare_settings.get_commcare_settings_lookup()['hq']['text_input']
value = self.text_input
setting_version = setting['since'].get(value)
if setting_version:
setting_version = tuple(map(int, setting_version.split('.')))
my_version = build.minor_release()
if my_version < setting_version:
i = setting['values'].index(value)
assert i != -1
name = _(setting['value_names'][i])
raise AppEditingError((
'%s Text Input is not supported '
'in CommCare versions before %s.%s. '
'(You are using %s.%s)'
) % ((name,) + setting_version + my_version))
@property
def advanced_app_builder(self):
properties = (self.profile or {}).get('properties', {})
return properties.get('advanced_app_builder', 'false') == 'true'
@property
def jad_settings(self):
settings = {
'JavaRosa-Admin-Password': self.admin_password,
'Profile': self.profile_loc,
'MIDlet-Jar-URL': self.jar_url,
#'MIDlet-Name': self.name,
# e.g. 2011-Apr-11 20:45
'CommCare-Release': "true",
}
if self.build_version < '2.8':
settings['Build-Number'] = self.version
return settings
def create_build_files(self, save=False, build_profile_id=None):
built_on = datetime.datetime.utcnow()
all_files = self.create_all_files(build_profile_id)
if save:
self.date_created = built_on
self.built_on = built_on
self.built_with = BuildRecord(
version=self.build_spec.version,
build_number=self.version,
datetime=built_on,
)
for filepath in all_files:
self.lazy_put_attachment(all_files[filepath],
'files/%s' % filepath)
def create_jadjar_from_build_files(self, save=False):
self.validate_jar_path()
with CriticalSection(['create_jadjar_' + self._id]):
try:
return (
self.lazy_fetch_attachment('CommCare.jad'),
self.lazy_fetch_attachment('CommCare.jar'),
)
except (ResourceError, KeyError):
all_files = {
filename[len('files/'):]: self.lazy_fetch_attachment(filename)
for filename in self.blobs if filename.startswith('files/')
}
all_files = {
name: (contents if isinstance(contents, str) else contents.encode('utf-8'))
for name, contents in all_files.items()
}
release_date = self.built_with.datetime or datetime.datetime.utcnow()
jad_settings = {
'Released-on': release_date.strftime("%Y-%b-%d %H:%M"),
}
jad_settings.update(self.jad_settings)
jadjar = self.get_jadjar().pack(all_files, jad_settings)
if save:
self.lazy_put_attachment(jadjar.jad, 'CommCare.jad')
self.lazy_put_attachment(jadjar.jar, 'CommCare.jar')
self.built_with.signed = jadjar.signed
return jadjar.jad, jadjar.jar
def validate_app(self):
errors = []
errors.extend(self.check_password_charset())
try:
self.validate_fixtures()
self.validate_intents()
self.create_all_files()
except CaseXPathValidationError as cve:
errors.append({
'type': 'invalid case xpath reference',
'module': cve.module,
'form': cve.form,
})
except UserCaseXPathValidationError as ucve:
errors.append({
'type': 'invalid user case xpath reference',
'module': ucve.module,
'form': ucve.form,
})
except (AppEditingError, XFormValidationError, XFormException,
PermissionDenied, SuiteValidationError) as e:
errors.append({'type': 'error', 'message': unicode(e)})
except Exception as e:
if settings.DEBUG:
raise
# this is much less useful/actionable without a URL
# so make sure to include the request
logging.error('Unexpected error building app', exc_info=True,
extra={'request': view_utils.get_request()})
errors.append({'type': 'error', 'message': 'unexpected error: %s' % e})
return errors
@absolute_url_property
def odk_profile_url(self):
return reverse('download_odk_profile', args=[self.domain, self._id])
@absolute_url_property
def odk_media_profile_url(self):
return reverse('download_odk_media_profile', args=[self.domain, self._id])
@property
def odk_profile_display_url(self):
return self.short_odk_url or self.odk_profile_url
@property
def odk_media_profile_display_url(self):
return self.short_odk_media_url or self.odk_media_profile_url
def get_odk_qr_code(self, with_media=False, build_profile_id=None):
"""Returns a QR code, as a PNG to install on CC-ODK"""
try:
return self.lazy_fetch_attachment("qrcode.png")
except ResourceNotFound:
from pygooglechart import QRChart
HEIGHT = WIDTH = 250
code = QRChart(HEIGHT, WIDTH)
url = self.odk_profile_url if not with_media else self.odk_media_profile_url
if build_profile_id is not None:
url += '?profile={profile_id}'.format(profile_id=build_profile_id)
code.add_data(url)
# "Level L" error correction with a 0 pixel margin
code.set_ec('L', 0)
f, fname = tempfile.mkstemp()
code.download(fname)
os.close(f)
with open(fname, "rb") as f:
png_data = f.read()
self.lazy_put_attachment(png_data, "qrcode.png",
content_type="image/png")
return png_data
def generate_shortened_url(self, view_name, build_profile_id=None):
try:
if settings.BITLY_LOGIN:
if build_profile_id is not None:
long_url = "{}{}?profile={}".format(
self.url_base, reverse(view_name, args=[self.domain, self._id]), build_profile_id
)
else:
long_url = "{}{}".format(self.url_base, reverse(view_name, args=[self.domain, self._id]))
shortened_url = bitly.shorten(long_url)
else:
shortened_url = None
except Exception:
logging.exception("Problem creating bitly url for app %s. Do you have network?" % self.get_id)
else:
return shortened_url
def get_short_url(self, build_profile_id=None):
if not build_profile_id:
if not self.short_url:
self.short_url = self.generate_shortened_url('download_jad')
self.save()
return self.short_url
else:
return self.generate_shortened_url('download_jad', build_profile_id)
def get_short_odk_url(self, with_media=False, build_profile_id=None):
if not build_profile_id:
if with_media:
if not self.short_odk_media_url:
self.short_odk_media_url = self.generate_shortened_url('download_odk_media_profile')
self.save()
return self.short_odk_media_url
else:
if not self.short_odk_url:
self.short_odk_url = self.generate_shortened_url('download_odk_profile')
self.save()
return self.short_odk_url
else:
if with_media:
return self.generate_shortened_url('download_odk_media_profile', build_profile_id)
else:
return self.generate_shortened_url('download_odk_profile', build_profile_id)
def fetch_jar(self):
return self.get_jadjar().fetch_jar()
def make_build(self, comment=None, user_id=None, previous_version=None):
copy = super(ApplicationBase, self).make_build()
if not copy._id:
# I expect this always to be the case
# but check explicitly so as not to change the _id if it exists
copy._id = copy.get_db().server.next_uuid()
force_new_forms = False
if previous_version and self.build_profiles != previous_version.build_profiles:
force_new_forms = True
copy.set_form_versions(previous_version, force_new_forms)
copy.set_media_versions(previous_version)
copy.create_build_files(save=True)
# since this hard to put in a test
# I'm putting this assert here if copy._id is ever None
# which makes tests error
assert copy._id
copy.build_comment = comment
copy.comment_from = user_id
if user_id:
user = CouchUser.get(user_id)
if not user.has_built_app:
user.has_built_app = True
user.save()
copy.is_released = False
if not copy.is_remote_app():
copy.update_mm_map()
return copy
def delete_app(self):
domain_has_apps.clear(self.domain)
self.doc_type += '-Deleted'
record = DeleteApplicationRecord(
domain=self.domain,
app_id=self.id,
datetime=datetime.datetime.utcnow()
)
record.save()
return record
def save(self, response_json=None, increment_version=None, **params):
if not self._rev and not domain_has_apps(self.domain):
domain_has_apps.clear(self.domain)
user = getattr(view_utils.get_request(), 'couch_user', None)
if user and user.days_since_created == 0:
track_workflow(user.get_email(), 'Saved the App Builder within first 24 hours')
super(ApplicationBase, self).save(
response_json=response_json, increment_version=increment_version, **params)
def set_form_versions(self, previous_version, force_new_version=False):
# by default doing nothing here is fine.
pass
def set_media_versions(self, previous_version):
pass
def update_mm_map(self):
if self.build_profiles and domain_has_privilege(self.domain, privileges.BUILD_PROFILES):
for lang in self.langs:
self.media_language_map[lang] = MediaList()
for form in self.get_forms():
xml = form.wrapped_xform()
for lang in self.langs:
media = []
for path in xml.all_media_references(lang):
if path is not None:
media.append(path)
map_item = self.multimedia_map.get(path)
#dont break if multimedia is missing
if map_item:
map_item.form_media = True
self.media_language_map[lang].media_refs.extend(media)
else:
self.media_language_map = {}
def get_build_langs(self, build_profile_id=None):
if build_profile_id is not None:
return self.build_profiles[build_profile_id].langs
else:
return self.langs
def validate_lang(lang):
if not re.match(r'^[a-z]{2,3}(-[a-z]*)?$', lang):
raise ValueError("Invalid Language")
def validate_property(property):
"""
Validate a case property name
>>> validate_property('parent/maternal-grandmother_fullName')
>>> validate_property('foo+bar')
Traceback (most recent call last):
...
ValueError: Invalid Property
"""
# this regex is also copied in propertyList.ejs
if not re.match(r'^[a-zA-Z][\w_-]*(/[a-zA-Z][\w_-]*)*$', property):
raise ValueError("Invalid Property")
def validate_detail_screen_field(field):
# If you change here, also change here:
# corehq/apps/app_manager/static/app_manager/js/detail-screen-config.js
field_re = r'^([a-zA-Z][\w_-]*:)*([a-zA-Z][\w_-]*/)*#?[a-zA-Z][\w_-]*$'
if not re.match(field_re, field):
raise ValueError("Invalid Sort Field")
class SavedAppBuild(ApplicationBase):
def to_saved_build_json(self, timezone):
data = super(SavedAppBuild, self).to_json().copy()
for key in ('modules', 'user_registration', 'external_blobs',
'_attachments', 'profile', 'translations'
'description', 'short_description'):
data.pop(key, None)
built_on_user_time = ServerTime(self.built_on).user_time(timezone)
data.update({
'id': self.id,
'built_on_date': built_on_user_time.ui_string(USER_DATE_FORMAT),
'built_on_time': built_on_user_time.ui_string(USER_TIME_FORMAT),
'menu_item_label': self.built_with.get_menu_item_label(),
'jar_path': self.get_jar_path(),
'short_name': self.short_name,
'enable_offline_install': self.enable_offline_install,
})
comment_from = data['comment_from']
if comment_from:
try:
comment_user = CouchUser.get(comment_from)
except ResourceNotFound:
data['comment_user_name'] = comment_from
else:
data['comment_user_name'] = comment_user.full_name
return data
class Application(ApplicationBase, TranslationMixin, HQMediaMixin):
"""
An Application that can be created entirely through the online interface
"""
modules = SchemaListProperty(ModuleBase)
name = StringProperty()
# profile's schema is {'features': {}, 'properties': {}, 'custom_properties': {}}
# ended up not using a schema because properties is a reserved word
profile = DictProperty()
use_custom_suite = BooleanProperty(default=False)
custom_base_url = StringProperty()
cloudcare_enabled = BooleanProperty(default=False)
translation_strategy = StringProperty(default='select-known',
choices=app_strings.CHOICES.keys())
commtrack_requisition_mode = StringProperty(choices=CT_REQUISITION_MODES)
auto_gps_capture = BooleanProperty(default=False)
date_created = DateTimeProperty()
created_from_template = StringProperty()
use_grid_menus = BooleanProperty(default=False)
grid_form_menus = StringProperty(default='none',
choices=['none', 'all', 'some'])
# legacy property; kept around to be able to identify (deprecated) v1 apps
application_version = StringProperty(default=APP_V2, choices=[APP_V1, APP_V2], required=False)
def assert_app_v2(self):
assert self.application_version == APP_V2
@property
@memoized
def commtrack_enabled(self):
if settings.UNIT_TESTING:
return False # override with .tests.util.commtrack_enabled
domain_obj = Domain.get_by_name(self.domain) if self.domain else None
return domain_obj.commtrack_enabled if domain_obj else False
@classmethod
def wrap(cls, data):
for module in data.get('modules', []):
for attr in ('case_label', 'referral_label'):
if not module.has_key(attr):
module[attr] = {}
for lang in data['langs']:
if not module['case_label'].get(lang):
module['case_label'][lang] = commcare_translations.load_translations(lang).get('cchq.case', 'Cases')
if not module['referral_label'].get(lang):
module['referral_label'][lang] = commcare_translations.load_translations(lang).get('cchq.referral', 'Referrals')
data.pop('commtrack_enabled', None) # Remove me after migrating apps
self = super(Application, cls).wrap(data)
# make sure all form versions are None on working copies
if not self.copy_of:
for form in self.get_forms():
form.version = None
# weird edge case where multimedia_map gets set to null and causes issues
if self.multimedia_map is None:
self.multimedia_map = {}
return self
def save(self, *args, **kwargs):
super(Application, self).save(*args, **kwargs)
# Import loop if this is imported at the top
# TODO: revamp so signal_connections <- models <- signals
from corehq.apps.app_manager import signals
signals.app_post_save.send(Application, application=self)
def make_reversion_to_copy(self, copy):
app = super(Application, self).make_reversion_to_copy(copy)
for form in app.get_forms():
# reset the form's validation cache, since the form content is
# likely to have changed in the revert!
form.validation_cache = None
form.version = None
app.build_broken = False
return app
@property
def profile_url(self):
return self.hq_profile_url
@absolute_url_property
def suite_url(self):
return reverse('download_suite', args=[self.domain, self.get_id])
@property
def suite_loc(self):
if self.enable_relative_suite_path:
return './suite.xml'
else:
return "jr://resource/suite.xml"
@absolute_url_property
def media_suite_url(self):
return reverse('download_media_suite', args=[self.domain, self.get_id])
@property
def media_suite_loc(self):
if self.enable_relative_suite_path:
return "./media_suite.xml"
else:
return "jr://resource/media_suite.xml"
@property
def default_language(self):
return self.langs[0] if len(self.langs) > 0 else "en"
def fetch_xform(self, module_id=None, form_id=None, form=None, build_profile_id=None):
if not form:
form = self.get_module(module_id).get_form(form_id)
return form.validate_form().render_xform(build_profile_id).encode('utf-8')
def set_form_versions(self, previous_version, force_new_version=False):
"""
Set the 'version' property on each form as follows to the current app version if the form is new
or has changed since the last build. Otherwise set it to the version from the last build.
"""
def _hash(val):
return hashlib.md5(val).hexdigest()
if previous_version:
for form_stuff in self.get_forms(bare=False):
filename = 'files/%s' % self.get_form_filename(**form_stuff)
form = form_stuff["form"]
if not force_new_version:
form_version = None
try:
previous_form = previous_version.get_form(form.unique_id)
# take the previous version's compiled form as-is
# (generation code may have changed since last build)
previous_source = previous_version.fetch_attachment(filename)
except (ResourceNotFound, FormNotFoundException):
pass
else:
previous_hash = _hash(previous_source)
# hack - temporarily set my version to the previous version
# so that that's not treated as the diff
previous_form_version = previous_form.get_version()
form.version = previous_form_version
my_hash = _hash(self.fetch_xform(form=form))
if previous_hash == my_hash:
form_version = previous_form_version
if form_version is None:
form.version = None
else:
form.version = form_version
else:
form.version = None
def set_media_versions(self, previous_version):
"""
Set the media version numbers for all media in the app to the current app version
if the media is new or has changed since the last build. Otherwise set it to the
version from the last build.
"""
# access to .multimedia_map is slow
prev_multimedia_map = previous_version.multimedia_map if previous_version else {}
for path, map_item in self.multimedia_map.iteritems():
prev_map_item = prev_multimedia_map.get(path, None)
if prev_map_item and prev_map_item.unique_id:
# Re-use the id so CommCare knows it's the same resource
map_item.unique_id = prev_map_item.unique_id
if (prev_map_item and prev_map_item.version
and prev_map_item.multimedia_id == map_item.multimedia_id):
map_item.version = prev_map_item.version
else:
map_item.version = self.version
def ensure_module_unique_ids(self, should_save=False):
"""
Creates unique_ids for modules that don't have unique_id attributes
should_save: the doc will be saved only if should_save is set to True
WARNING: If called on the same doc in different requests without saving,
this function will set different uuid each time,
likely causing unexpected behavior
"""
if any(not mod.unique_id for mod in self.modules):
for mod in self.modules:
mod.get_or_create_unique_id()
if should_save:
self.save()
def create_app_strings(self, lang, build_profile_id=None):
gen = app_strings.CHOICES[self.translation_strategy]
if lang == 'default':
return gen.create_default_app_strings(self, build_profile_id)
else:
return gen.create_app_strings(self, lang)
@property
def skip_validation(self):
properties = (self.profile or {}).get('properties', {})
return properties.get('cc-content-valid', 'yes')
@property
def jad_settings(self):
s = super(Application, self).jad_settings
s.update({
'Skip-Validation': self.skip_validation,
})
return s
def create_profile(self, is_odk=False, with_media=False,
template='app_manager/profile.xml', build_profile_id=None):
self__profile = self.profile
app_profile = defaultdict(dict)
for setting in commcare_settings.get_custom_commcare_settings():
setting_type = setting['type']
setting_id = setting['id']
if setting_type not in ('properties', 'features'):
setting_value = None
elif setting_id not in self__profile.get(setting_type, {}):
if 'commcare_default' in setting and setting['commcare_default'] != setting['default']:
setting_value = setting['default']
else:
setting_value = None
else:
setting_value = self__profile[setting_type][setting_id]
if setting_value:
app_profile[setting_type][setting_id] = {
'value': setting_value,
'force': setting.get('force', False)
}
# assert that it gets explicitly set once per loop
del setting_value
if self.case_sharing:
app_profile['properties']['server-tether'] = {
'force': True,
'value': 'sync',
}
logo_refs = [logo_name for logo_name in self.logo_refs if logo_name in ANDROID_LOGO_PROPERTY_MAPPING]
if logo_refs and domain_has_privilege(self.domain, privileges.COMMCARE_LOGO_UPLOADER):
for logo_name in logo_refs:
app_profile['properties'][ANDROID_LOGO_PROPERTY_MAPPING[logo_name]] = {
'value': self.logo_refs[logo_name]['path'],
}
if with_media:
profile_url = self.media_profile_url if not is_odk else (self.odk_media_profile_url + '?latest=true')
else:
profile_url = self.profile_url if not is_odk else (self.odk_profile_url + '?latest=true')
if toggles.CUSTOM_PROPERTIES.enabled(self.domain) and "custom_properties" in self__profile:
app_profile['custom_properties'].update(self__profile['custom_properties'])
locale = self.get_build_langs(build_profile_id)[0]
return render_to_string(template, {
'is_odk': is_odk,
'app': self,
'profile_url': profile_url,
'app_profile': app_profile,
'cc_user_domain': cc_user_domain(self.domain),
'include_media_suite': with_media,
'uniqueid': self.copy_of or self.id,
'name': self.name,
'descriptor': u"Profile File",
'build_profile_id': build_profile_id,
'locale': locale
}).encode('utf-8')
@property
def custom_suite(self):
try:
return self.lazy_fetch_attachment('custom_suite.xml')
except ResourceNotFound:
return ""
def set_custom_suite(self, value):
self.put_attachment(value, 'custom_suite.xml')
def create_suite(self, build_profile_id=None):
self.assert_app_v2()
return SuiteGenerator(self, build_profile_id).generate_suite()
def create_media_suite(self, build_profile_id=None):
return MediaSuiteGenerator(self, build_profile_id).generate_suite()
@classmethod
def get_form_filename(cls, type=None, form=None, module=None):
return 'modules-%s/forms-%s.xml' % (module.id, form.id)
def create_all_files(self, build_profile_id=None):
prefix = '' if not build_profile_id else build_profile_id + '/'
files = {
'{}profile.xml'.format(prefix): self.create_profile(is_odk=False, build_profile_id=build_profile_id),
'{}profile.ccpr'.format(prefix): self.create_profile(is_odk=True, build_profile_id=build_profile_id),
'{}media_profile.xml'.format(prefix):
self.create_profile(is_odk=False, with_media=True, build_profile_id=build_profile_id),
'{}media_profile.ccpr'.format(prefix):
self.create_profile(is_odk=True, with_media=True, build_profile_id=build_profile_id),
'{}suite.xml'.format(prefix): self.create_suite(build_profile_id),
'{}media_suite.xml'.format(prefix): self.create_media_suite(build_profile_id),
}
langs_for_build = self.get_build_langs(build_profile_id)
for lang in ['default'] + langs_for_build:
files["{prefix}{lang}/app_strings.txt".format(
prefix=prefix, lang=lang)] = self.create_app_strings(lang, build_profile_id)
for form_stuff in self.get_forms(bare=False):
filename = prefix + self.get_form_filename(**form_stuff)
form = form_stuff['form']
try:
files[filename] = self.fetch_xform(form=form, build_profile_id=build_profile_id)
except XFormException as e:
raise XFormException(_('Error in form "{}": {}').format(trans(form.name), unicode(e)))
return files
get_modules = IndexedSchema.Getter('modules')
@parse_int([1])
def get_module(self, i):
try:
return self.modules[i].with_id(i % len(self.modules), self)
except IndexError:
raise ModuleNotFoundException()
def get_module_by_unique_id(self, unique_id, error=''):
def matches(module):
return module.get_or_create_unique_id() == unique_id
for obj in self.get_modules():
if matches(obj):
return obj
if not error:
error = _("Could not find '{unique_id}' in app '{app_id}'.").format(
app_id=self.id, unique_id=unique_id)
raise ModuleNotFoundException(error)
def get_forms(self, bare=True):
for module in self.get_modules():
for form in module.get_forms():
yield form if bare else {
'type': 'module_form',
'module': module,
'form': form
}
def get_form(self, unique_form_id, bare=True):
def matches(form):
return form.get_unique_id() == unique_form_id
for obj in self.get_forms(bare):
if matches(obj if bare else obj['form']):
return obj
raise FormNotFoundException(
("Form in app '%s' with unique id '%s' not found"
% (self.id, unique_form_id)))
def get_form_location(self, unique_form_id):
for m_index, module in enumerate(self.get_modules()):
for f_index, form in enumerate(module.get_forms()):
if unique_form_id == form.unique_id:
return m_index, f_index
raise KeyError("Form in app '%s' with unique id '%s' not found" % (self.id, unique_form_id))
@classmethod
def new_app(cls, domain, name, lang="en"):
app = cls(domain=domain, modules=[], name=name, langs=[lang],
date_created=datetime.datetime.utcnow(), vellum_case_management=True)
return app
def add_module(self, module):
self.modules.append(module)
return self.get_module(-1)
def delete_module(self, module_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
except ModuleNotFoundException:
return None
record = DeleteModuleRecord(
domain=self.domain,
app_id=self.id,
module_id=module.id,
module=module,
datetime=datetime.datetime.utcnow()
)
del self.modules[module.id]
record.save()
return record
def new_form(self, module_id, name, lang, attachment=""):
module = self.get_module(module_id)
return module.new_form(name, lang, attachment)
def delete_form(self, module_unique_id, form_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
form = self.get_form(form_unique_id)
except (ModuleNotFoundException, FormNotFoundException):
return None
record = DeleteFormRecord(
domain=self.domain,
app_id=self.id,
module_unique_id=module_unique_id,
form_id=form.id,
form=form,
datetime=datetime.datetime.utcnow(),
)
record.save()
try:
form.pre_delete_hook()
except NotImplementedError:
pass
del module['forms'][form.id]
return record
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
if old_lang == new_lang:
return
if new_lang in self.langs:
raise AppEditingError("Language %s already exists!" % new_lang)
for i,lang in enumerate(self.langs):
if lang == old_lang:
self.langs[i] = new_lang
for profile in self.build_profiles:
for i, lang in enumerate(profile.langs):
if lang == old_lang:
profile.langs[i] = new_lang
for module in self.get_modules():
module.rename_lang(old_lang, new_lang)
_rename_key(self.translations, old_lang, new_lang)
def rearrange_modules(self, i, j):
modules = self.modules
try:
modules.insert(i, modules.pop(j))
except IndexError:
raise RearrangeError()
self.modules = modules
def rearrange_forms(self, to_module_id, from_module_id, i, j):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the rearrangement (confusingly) goes through anyway.
This is intentional.
"""
to_module = self.get_module(to_module_id)
from_module = self.get_module(from_module_id)
try:
from_module.forms[j].pre_move_hook(from_module, to_module)
except NotImplementedError:
pass
try:
form = from_module.forms.pop(j)
if toggles.APP_MANAGER_V2.enabled(self.domain):
if not to_module.is_surveys and i == 0:
# first form is the reg form
i = 1
if from_module.is_surveys != to_module.is_surveys:
if from_module.is_surveys:
form.requires = "case"
form.actions.update_case = UpdateCaseAction(
condition=FormActionCondition(type='always'))
else:
form.requires = "none"
form.actions.update_case = UpdateCaseAction(
condition=FormActionCondition(type='never'))
to_module.add_insert_form(from_module, form, index=i, with_source=True)
except IndexError:
raise RearrangeError()
if to_module.case_type != from_module.case_type \
and not toggles.APP_MANAGER_V2.enabled(self.domain):
raise ConflictingCaseTypeError()
def scrub_source(self, source):
return update_unique_ids(source)
def copy_form(self, module_id, form_id, to_module_id):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the copying (confusingly) goes through anyway.
This is intentional.
"""
from_module = self.get_module(module_id)
form = from_module.get_form(form_id)
to_module = self.get_module(to_module_id)
self._copy_form(from_module, form, to_module, rename=True)
def _copy_form(self, from_module, form, to_module, *args, **kwargs):
if not form.source:
raise BlankXFormError()
if from_module['case_type'] != to_module['case_type']:
raise ConflictingCaseTypeError()
copy_source = deepcopy(form.to_json())
if 'unique_id' in copy_source:
del copy_source['unique_id']
if 'rename' in kwargs and kwargs['rename']:
for lang, name in copy_source['name'].iteritems():
with override(lang):
copy_source['name'][lang] = _('Copy of {name}').format(name=name)
copy_form = to_module.add_insert_form(from_module, FormBase.wrap(copy_source))
save_xform(self, copy_form, form.source)
@cached_property
def has_case_management(self):
for module in self.get_modules():
for form in module.get_forms():
if len(form.active_actions()) > 0:
return True
return False
@memoized
def case_type_exists(self, case_type):
return case_type in self.get_case_types()
@memoized
def get_case_types(self):
extra_types = set()
if is_usercase_in_use(self.domain):
extra_types.add(USERCASE_TYPE)
return set(chain(*[m.get_case_types() for m in self.get_modules()])) | extra_types
def has_media(self):
return len(self.multimedia_map) > 0
@memoized
def get_xmlns_map(self):
xmlns_map = defaultdict(list)
for form in self.get_forms():
xmlns_map[form.xmlns].append(form)
return xmlns_map
def get_form_by_xmlns(self, xmlns, log_missing=True):
if xmlns == "http://code.javarosa.org/devicereport":
return None
forms = self.get_xmlns_map()[xmlns]
if len(forms) != 1:
if log_missing or len(forms) > 1:
logging.error('App %s in domain %s has %s forms with xmlns %s' % (
self.get_id,
self.domain,
len(forms),
xmlns,
))
return None
else:
form, = forms
return form
def get_questions(self, xmlns):
form = self.get_form_by_xmlns(xmlns)
if not form:
return []
return form.get_questions(self.langs)
def check_subscription(self):
def app_uses_usercase(app):
return any(m.uses_usercase() for m in app.get_modules())
errors = []
if app_uses_usercase(self) and not domain_has_privilege(self.domain, privileges.USER_CASE):
errors.append({
'type': 'subscription',
'message': _('Your application is using User Case functionality. You can remove User Case '
'functionality by opening the User Case Management tab in a form that uses it, and '
'clicking "Remove User Case Properties".')
})
return errors
def validate_app(self):
xmlns_count = defaultdict(int)
errors = []
for lang in self.langs:
if not lang:
errors.append({'type': 'empty lang'})
if not self.modules:
errors.append({'type': "no modules"})
for module in self.get_modules():
errors.extend(module.validate_for_build())
for form in self.get_forms():
errors.extend(form.validate_for_build(validate_module=False))
# make sure that there aren't duplicate xmlns's
xmlns_count[form.xmlns] += 1
for xmlns in xmlns_count:
if xmlns_count[xmlns] > 1:
errors.append({'type': "duplicate xmlns", "xmlns": xmlns})
if any(not module.unique_id for module in self.get_modules()):
raise ModuleIdMissingException
modules_dict = {m.unique_id: m for m in self.get_modules()}
def _parent_select_fn(module):
if hasattr(module, 'parent_select') and module.parent_select.active:
return module.parent_select.module_id
if self._has_dependency_cycle(modules_dict, _parent_select_fn):
errors.append({'type': 'parent cycle'})
errors.extend(self._child_module_errors(modules_dict))
errors.extend(self.check_subscription())
if not errors:
errors = super(Application, self).validate_app()
return errors
def _has_dependency_cycle(self, modules, neighbour_id_fn):
"""
Detect dependency cycles given modules and the neighbour_id_fn
:param modules: A mapping of module unique_ids to Module objects
:neighbour_id_fn: function to get the neibour module unique_id
:return: True if there is a cycle in the module relationship graph
"""
visited = set()
completed = set()
def cycle_helper(m):
if m.id in visited:
if m.id in completed:
return False
return True
visited.add(m.id)
parent = modules.get(neighbour_id_fn(m), None)
if parent is not None and cycle_helper(parent):
return True
completed.add(m.id)
return False
for module in modules.values():
if cycle_helper(module):
return True
return False
def _child_module_errors(self, modules_dict):
module_errors = []
def _root_module_fn(module):
if hasattr(module, 'root_module_id'):
return module.root_module_id
if self._has_dependency_cycle(modules_dict, _root_module_fn):
module_errors.append({'type': 'root cycle'})
module_ids = set([m.unique_id for m in self.get_modules()])
root_ids = set([_root_module_fn(m) for m in self.get_modules() if _root_module_fn(m) is not None])
if not root_ids.issubset(module_ids):
module_errors.append({'type': 'unknown root'})
return module_errors
def get_profile_setting(self, s_type, s_id):
setting = self.profile.get(s_type, {}).get(s_id)
if setting is not None:
return setting
yaml_setting = commcare_settings.get_commcare_settings_lookup()[s_type][s_id]
for contingent in yaml_setting.get("contingent_default", []):
if check_condition(self, contingent["condition"]):
setting = contingent["value"]
if setting is not None:
return setting
if self.build_version < yaml_setting.get("since", "0"):
setting = yaml_setting.get("disabled_default", None)
if setting is not None:
return setting
return yaml_setting.get("default")
@property
def has_careplan_module(self):
return any((module for module in self.modules if isinstance(module, CareplanModule)))
@quickcache(['self.version'])
def get_case_metadata(self):
from corehq.apps.reports.formdetails.readable import AppCaseMetadata
builder = ParentCasePropertyBuilder(self)
case_relationships = builder.get_parent_type_map(self.get_case_types())
meta = AppCaseMetadata()
for case_type, relationships in case_relationships.items():
type_meta = meta.get_type(case_type)
type_meta.relationships = relationships
for module in self.get_modules():
for form in module.get_forms():
form.update_app_case_meta(meta)
seen_types = []
def get_children(case_type):
seen_types.append(case_type)
return [type_.name for type_ in meta.case_types if type_.relationships.get('parent') == case_type]
def get_hierarchy(case_type):
return {child: get_hierarchy(child) for child in get_children(case_type)}
roots = [type_ for type_ in meta.case_types if not type_.relationships]
for type_ in roots:
meta.type_hierarchy[type_.name] = get_hierarchy(type_.name)
for type_ in meta.case_types:
if type_.name not in seen_types:
meta.type_hierarchy[type_.name] = {}
type_.error = _("Error in case type hierarchy")
return meta
def get_subcase_types(self, case_type):
"""
Return the subcase types defined across an app for the given case type
"""
return {t for m in self.get_modules()
if m.case_type == case_type
for t in m.get_subcase_types()}
@memoized
def grid_display_for_some_modules(self):
return self.grid_menu_toggle_enabled() and self.grid_form_menus == 'some'
@memoized
def grid_display_for_all_modules(self):
return self.grid_menu_toggle_enabled() and self.grid_form_menus == 'all'
def grid_menu_toggle_enabled(self):
return toggles.GRID_MENUS.enabled(self.domain)
class RemoteApp(ApplicationBase):
"""
A wrapper for a url pointing to a suite or profile file. This allows you to
write all the files for an app by hand, and then give the url to app_manager
and let it package everything together for you.
"""
profile_url = StringProperty(default="http://")
name = StringProperty()
manage_urls = BooleanProperty(default=False)
questions_map = DictProperty(required=False)
def is_remote_app(self):
return True
@classmethod
def new_app(cls, domain, name, lang='en'):
app = cls(domain=domain, name=name, langs=[lang])
return app
def create_profile(self, is_odk=False, langs=None):
# we don't do odk for now anyway
return remote_app.make_remote_profile(self, langs)
def strip_location(self, location):
return remote_app.strip_location(self.profile_url, location)
def fetch_file(self, location):
location = self.strip_location(location)
url = urljoin(self.profile_url, location)
try:
content = urlopen(url).read()
except Exception:
raise AppEditingError('Unable to access resource url: "%s"' % url)
return location, content
def get_build_langs(self):
if self.build_profiles:
if len(self.build_profiles.keys()) > 1:
raise AppEditingError('More than one app profile for a remote app')
else:
# return first profile, generated as part of lazy migration
return self.build_profiles[self.build_profiles.keys()[0]].langs
else:
return self.langs
@classmethod
def get_locations(cls, suite):
for resource in suite.findall('*/resource'):
try:
loc = resource.findtext('location[@authority="local"]')
except Exception:
loc = resource.findtext('location[@authority="remote"]')
yield resource.getparent().tag, loc
@property
def SUITE_XPATH(self):
return 'suite/resource/location[@authority="local"]'
def create_all_files(self, build_profile_id=None):
langs_for_build = self.get_build_langs()
files = {
'profile.xml': self.create_profile(langs=langs_for_build),
}
tree = _parse_xml(files['profile.xml'])
def add_file_from_path(path, strict=False, transform=None):
added_files = []
# must find at least one
try:
tree.find(path).text
except (TypeError, AttributeError):
if strict:
raise AppEditingError("problem with file path reference!")
else:
return
for loc_node in tree.findall(path):
loc, file = self.fetch_file(loc_node.text)
if transform:
file = transform(file)
files[loc] = file
added_files.append(file)
return added_files
add_file_from_path('features/users/logo')
try:
suites = add_file_from_path(
self.SUITE_XPATH,
strict=True,
transform=(lambda suite:
remote_app.make_remote_suite(self, suite))
)
except AppEditingError:
raise AppEditingError(ugettext('Problem loading suite file from profile file. Is your profile file correct?'))
for suite in suites:
suite_xml = _parse_xml(suite)
for tag, location in self.get_locations(suite_xml):
location, data = self.fetch_file(location)
if tag == 'xform' and langs_for_build:
try:
xform = XForm(data)
except XFormException as e:
raise XFormException('In file %s: %s' % (location, e))
xform.exclude_languages(whitelist=langs_for_build)
data = xform.render()
files.update({location: data})
return files
def make_questions_map(self):
langs_for_build = self.get_build_langs()
if self.copy_of:
xmlns_map = {}
def fetch(location):
filepath = self.strip_location(location)
return self.fetch_attachment('files/%s' % filepath)
profile_xml = _parse_xml(fetch('profile.xml'))
suite_location = profile_xml.find(self.SUITE_XPATH).text
suite_xml = _parse_xml(fetch(suite_location))
for tag, location in self.get_locations(suite_xml):
if tag == 'xform':
xform = XForm(fetch(location))
xmlns = xform.data_node.tag_xmlns
questions = xform.get_questions(langs_for_build)
xmlns_map[xmlns] = questions
return xmlns_map
else:
return None
def get_questions(self, xmlns):
if not self.questions_map:
self.questions_map = self.make_questions_map()
if not self.questions_map:
return []
self.save()
questions = self.questions_map.get(xmlns, [])
return questions
str_to_cls = {
"Application": Application,
"Application-Deleted": Application,
"RemoteApp": RemoteApp,
"RemoteApp-Deleted": RemoteApp,
}
def import_app(app_id_or_source, domain, source_properties=None, validate_source_domain=None):
if isinstance(app_id_or_source, basestring):
app_id = app_id_or_source
source = get_app(None, app_id)
src_dom = source['domain']
if validate_source_domain:
validate_source_domain(src_dom)
source = source.export_json()
source = json.loads(source)
else:
cls = str_to_cls[app_id_or_source['doc_type']]
# Don't modify original app source
app = cls.wrap(deepcopy(app_id_or_source))
source = app.export_json(dump_json=False)
try:
attachments = source['_attachments']
except KeyError:
attachments = {}
finally:
source['_attachments'] = {}
if source_properties is not None:
for key, value in source_properties.iteritems():
source[key] = value
cls = str_to_cls[source['doc_type']]
# Allow the wrapper to update to the current default build_spec
if 'build_spec' in source:
del source['build_spec']
app = cls.from_source(source, domain)
app.date_created = datetime.datetime.utcnow()
app.cloudcare_enabled = domain_has_privilege(domain, privileges.CLOUDCARE)
with app.atomic_blobs():
for name, attachment in attachments.items():
if re.match(ATTACHMENT_REGEX, name):
app.put_attachment(attachment, name)
if not app.is_remote_app():
for _, m in app.get_media_objects():
if domain not in m.valid_domains:
m.valid_domains.append(domain)
m.save()
if not app.is_remote_app() and any(module.uses_usercase() for module in app.get_modules()):
from corehq.apps.app_manager.util import enable_usercase
enable_usercase(domain)
return app
class DeleteApplicationRecord(DeleteRecord):
app_id = StringProperty()
def undo(self):
app = ApplicationBase.get(self.app_id)
app.doc_type = app.get_doc_type()
app.save(increment_version=False)
class DeleteModuleRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module = SchemaProperty(ModuleBase)
def undo(self):
app = Application.get(self.app_id)
modules = app.modules
modules.insert(self.module_id, self.module)
app.modules = modules
app.save()
class DeleteFormRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module_unique_id = StringProperty()
form_id = IntegerProperty()
form = SchemaProperty(FormBase)
def undo(self):
app = Application.get(self.app_id)
if self.module_unique_id is not None:
name = trans(self.form.name, app.default_language, include_lang=False)
module = app.get_module_by_unique_id(
self.module_unique_id,
error=_("Could not find form '{}'").format(name)
)
else:
module = app.modules[self.module_id]
forms = module.forms
forms.insert(self.form_id, self.form)
module.forms = forms
app.save()
class CareplanAppProperties(DocumentSchema):
name = StringProperty()
latest_release = StringProperty()
case_type = StringProperty()
goal_conf = DictProperty()
task_conf = DictProperty()
class CareplanConfig(Document):
domain = StringProperty()
app_configs = SchemaDictProperty(CareplanAppProperties)
@classmethod
def for_domain(cls, domain):
res = cache_core.cached_view(
cls.get_db(),
"by_domain_doc_type_date/view",
key=[domain, 'CareplanConfig', None],
reduce=False,
include_docs=True,
wrapper=cls.wrap)
if len(res) > 0:
result = res[0]
else:
result = None
return result
# backwards compatibility with suite-1.0.xml
FormBase.get_command_id = lambda self: id_strings.form_command(self)
FormBase.get_locale_id = lambda self: id_strings.form_locale(self)
ModuleBase.get_locale_id = lambda self: id_strings.module_locale(self)
ModuleBase.get_case_list_command_id = lambda self: id_strings.case_list_command(self)
ModuleBase.get_case_list_locale_id = lambda self: id_strings.case_list_locale(self)
Module.get_referral_list_command_id = lambda self: id_strings.referral_list_command(self)
Module.get_referral_list_locale_id = lambda self: id_strings.referral_list_locale(self)
|
bsd-3-clause
| -5,053,952,302,909,447,000
| 35.65539
| 132
| 0.582232
| false
| 4.138556
| false
| false
| false
|
alphagov/notifications-api
|
app/utils.py
|
1
|
5728
|
from datetime import datetime, timedelta
import pytz
from flask import url_for
from notifications_utils.template import (
BroadcastMessageTemplate,
HTMLEmailTemplate,
LetterPrintTemplate,
SMSMessageTemplate,
)
from notifications_utils.timezones import convert_utc_to_bst
from sqlalchemy import func
DATETIME_FORMAT_NO_TIMEZONE = "%Y-%m-%d %H:%M:%S.%f"
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
DATE_FORMAT = "%Y-%m-%d"
local_timezone = pytz.timezone("Europe/London")
def pagination_links(pagination, endpoint, **kwargs):
if 'page' in kwargs:
kwargs.pop('page', None)
links = {}
if pagination.has_prev:
links['prev'] = url_for(endpoint, page=pagination.prev_num, **kwargs)
if pagination.has_next:
links['next'] = url_for(endpoint, page=pagination.next_num, **kwargs)
links['last'] = url_for(endpoint, page=pagination.pages, **kwargs)
return links
def url_with_token(data, url, config, base_url=None):
from notifications_utils.url_safe_token import generate_token
token = generate_token(data, config['SECRET_KEY'], config['DANGEROUS_SALT'])
base_url = (base_url or config['ADMIN_BASE_URL']) + url
return base_url + token
def get_template_instance(template, values):
from app.models import BROADCAST_TYPE, EMAIL_TYPE, LETTER_TYPE, SMS_TYPE
return {
SMS_TYPE: SMSMessageTemplate,
EMAIL_TYPE: HTMLEmailTemplate,
LETTER_TYPE: LetterPrintTemplate,
BROADCAST_TYPE: BroadcastMessageTemplate,
}[template['template_type']](template, values)
def get_london_midnight_in_utc(date):
"""
This function converts date to midnight as BST (British Standard Time) to UTC,
the tzinfo is lastly removed from the datetime because the database stores the timestamps without timezone.
:param date: the day to calculate the London midnight in UTC for
:return: the datetime of London midnight in UTC, for example 2016-06-17 = 2016-06-16 23:00:00
"""
return local_timezone.localize(datetime.combine(date, datetime.min.time())).astimezone(
pytz.UTC).replace(
tzinfo=None)
def get_midnight_for_day_before(date):
day_before = date - timedelta(1)
return get_london_midnight_in_utc(day_before)
def get_london_month_from_utc_column(column):
"""
Where queries need to count notifications by month it needs to be
the month in BST (British Summer Time).
The database stores all timestamps as UTC without the timezone.
- First set the timezone on created_at to UTC
- then convert the timezone to BST (or Europe/London)
- lastly truncate the datetime to month with which we can group
queries
"""
return func.date_trunc(
"month",
func.timezone("Europe/London", func.timezone("UTC", column))
)
def get_public_notify_type_text(notify_type, plural=False):
from app.models import (
BROADCAST_TYPE,
PRECOMPILED_LETTER,
SMS_TYPE,
UPLOAD_DOCUMENT,
)
notify_type_text = notify_type
if notify_type == SMS_TYPE:
notify_type_text = 'text message'
elif notify_type == UPLOAD_DOCUMENT:
notify_type_text = 'document'
elif notify_type == PRECOMPILED_LETTER:
notify_type_text = 'precompiled letter'
elif notify_type == BROADCAST_TYPE:
notify_type_text = 'broadcast message'
return '{}{}'.format(notify_type_text, 's' if plural else '')
def midnight_n_days_ago(number_of_days):
"""
Returns midnight a number of days ago. Takes care of daylight savings etc.
"""
return get_london_midnight_in_utc(datetime.utcnow() - timedelta(days=number_of_days))
def escape_special_characters(string):
for special_character in ('\\', '_', '%', '/'):
string = string.replace(
special_character,
r'\{}'.format(special_character)
)
return string
def email_address_is_nhs(email_address):
return email_address.lower().endswith((
'@nhs.uk', '@nhs.net', '.nhs.uk', '.nhs.net',
))
def get_notification_table_to_use(service, notification_type, process_day, has_delete_task_run):
"""
Work out what table will contain notification data for a service by looking up their data retention.
Make sure that when you run this you think about whether the delete task has run for that day! If it's run, the
notifications from that day will have moved to NotificationHistory. The delete tasks run between 4 and 5am every
morning.
"""
from app.models import Notification, NotificationHistory
data_retention = service.data_retention.get(notification_type)
days_of_retention = data_retention.days_of_retention if data_retention else 7
todays_bst_date = convert_utc_to_bst(datetime.utcnow()).date()
days_ago = todays_bst_date - process_day
if not has_delete_task_run:
# if the task hasn't run yet, we've got an extra day of data in the notification table so can go back an extra
# day before looking at NotificationHistory
days_of_retention += 1
return Notification if days_ago <= timedelta(days=days_of_retention) else NotificationHistory
def get_archived_db_column_value(column):
date = datetime.utcnow().strftime("%Y-%m-%d")
return f'_archived_{date}_{column}'
def get_dt_string_or_none(val):
return val.strftime(DATETIME_FORMAT) if val else None
def get_uuid_string_or_none(val):
return str(val) if val else None
def format_sequential_number(sequential_number):
return format(sequential_number, "x").zfill(8)
def get_reference_from_personalisation(personalisation):
if personalisation:
return personalisation.get("reference")
return None
|
mit
| -5,519,629,083,492,794,000
| 33.095238
| 118
| 0.68523
| false
| 3.669443
| false
| false
| false
|
gnulinooks/sympy
|
sympy/geometry/line.py
|
1
|
19520
|
from sympy.core.basic import Basic, S, C
from sympy.simplify import simplify
from sympy.geometry.exceptions import GeometryError
from entity import GeometryEntity
from point import Point
class LinearEntity(GeometryEntity):
"""
A linear entity (line, ray, segment, etc) in space.
This is an abstract class and is not meant to be instantiated.
Subclasses should implement the following methods:
__eq__
__contains__
"""
def __new__(cls, p1, p2, **kwargs):
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError("%s.__new__ requires Point instances" % cls.__name__)
if p1 == p2:
raise RuntimeError("%s.__new__ requires two distinct points" % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
@property
def p1(self):
"""One of the defining points of a linear entity."""
return self.__getitem__(0)
@property
def p2(self):
"""One of the defining points of a linear entity."""
return self.__getitem__(1)
@property
def coefficients(self):
"""The coefficients (a,b,c) for equation ax+by+c=0"""
return (self.p1[1]-self.p2[1],
self.p2[0]-self.p1[0],
self.p1[0]*self.p2[1] - self.p1[1]*self.p2[0])
def is_concurrent(*lines):
"""
Returns True if the set of linear entities are concurrent, False
otherwise. Two or more linear entities are concurrent if they all
intersect at a single point.
Description of Method Used:
===========================
Simply take the first two lines and find their intersection.
If there is no intersection, then the first two lines were
parallel and had no intersection so concurrenecy is impossible
amongst the whole set. Otherwise, check to see if the
intersection point of the first two lines is a member on
the rest of the lines. If so, the lines are concurrent.
"""
_lines = lines
lines = GeometryEntity.extract_entities(lines)
# Concurrency requires intersection at a single point; One linear
# entity cannot be concurrent.
if len(lines) <= 1:
return False
try:
# Get the intersection (if parallel)
p = GeometryEntity.do_intersection(lines[0], lines[1])
if len(p) == 0: return False
# Make sure the intersection is on every linear entity
for line in lines[2:]:
if p[0] not in line:
return False
return True
except AttributeError:
return False
def is_parallel(l1, l2):
"""Returns True if l1 and l2 are parallel, False otherwise"""
try:
a1,b1,c1 = l1.coefficients
a2,b2,c2 = l2.coefficients
return bool(simplify(a1*b2 - b1*a2) == 0)
except AttributeError:
return False
def is_perpendicular(l1, l2):
"""Returns True if l1 and l2 are perpendicular, False otherwise"""
try:
a1,b1,c1 = l1.coefficients
a2,b2,c2 = l2.coefficients
return bool(simplify(a1*a2 + b1*b2) == 0)
except AttributeError:
return False
def angle_between(l1, l2):
"""
Returns an angle formed between the two linear entities.
Description of Method Used:
===========================
From the dot product of vectors v1 and v2 it is known that:
dot(v1, v2) = |v1|*|v2|*cos(A)
where A is the angle formed between the two vectors. We can
get the directional vectors of the two lines and readily
find the angle between the two using the above formula.
"""
v1 = l1.p2 - l1.p1
v2 = l2.p2 - l2.p1
return C.acos( (v1[0]*v2[0]+v1[1]*v2[1]) / (abs(v1)*abs(v2)) )
def parallel_line(self, p):
"""
Returns a new Line which is parallel to this linear entity and passes
through the specified point.
"""
d = self.p1 - self.p2
return Line(p, p + d)
def perpendicular_line(self, p):
"""
Returns a new Line which is perpendicular to this linear entity and
passes through the specified point.
"""
d1,d2 = self.p1 - self.p2
if d2 == 0: # If an horizontal line
if p[1] == self.p1[1]: # if p is on this linear entity
p2 = Point(p[0], p[1] + 1)
return Line(p, p2)
else:
p2 = Point(p[0], self.p1[1])
return Line(p, p2)
else:
p2 = Point(p[0] - d2, p[1] + d1)
return Line(p, p2)
def perpendicular_segment(self, p):
"""
Returns a new Segment which connects p to a point on this linear
entity and is also perpendicular to this line. Returns p itself
if p is on this linear entity.
"""
if p in self:
return p
pl = self.perpendicular_line(p)
p2 = GeometryEntity.do_intersection(self, pl)[0]
return Segment(p, p2)
@property
def slope(self):
"""
The slope of this linear entity, or infinity if vertical.
"""
d1,d2 = self.p1 - self.p2
if d1 == 0:
return S.Infinity
return simplify(d2/d1)
@property
def points(self):
"""The two points used to define this linear entity."""
return (self.p1, self.p2)
def projection(self, o):
"""
Project a point, line, ray, or segment onto this linear entity.
If projection cannot be performed then a GeometryError is raised.
Notes:
======
- A projection involves taking the two points that define
the linear entity and projecting those points onto a
Line and then reforming the linear entity using these
projections.
- A point P is projected onto a line L by finding the point
on L that is closest to P. This is done by creating a
perpendicular line through P and L and finding its
intersection with L.
"""
tline = Line(self.p1, self.p2)
def project(p):
"""Project a point onto the line representing self."""
if p in tline: return p
l1 = tline.perpendicular_line(p)
return tline.intersection(l1)[0]
projected = None
if isinstance(o, Point):
return project(o)
elif isinstance(o, LinearEntity):
n_p1 = project(o.p1)
n_p2 = project(o.p2)
if n_p1 == n_p2:
projected = n_p1
else:
projected = o.__class__(n_p1, n_p2)
# Didn't know how to project so raise an error
if projected is None:
n1 = self.__class__.__name__
n2 = o.__class__.__name__
raise GeometryError("Do not know how to project %s onto %s" % (n2, n1))
return GeometryEntity.do_intersection(self, projected)[0]
def intersection(self, o):
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
a1,b1,c1 = self.coefficients
a2,b2,c2 = o.coefficients
t = simplify(a1*b2 - a2*b1)
if t == 0: # are parallel?
if isinstance(self, Line):
if o.p1 in self:
return [o]
return []
elif isinstance(o, Line):
if self.p1 in o:
return [self]
return []
elif isinstance(self, Ray):
if isinstance(o, Ray):
# case 1, rays in the same direction
if self.xdirection == o.xdirection:
if self.source[0] < o.source[0]:
return [o]
return [self]
# case 2, rays in the opposite directions
else:
if o.source in self:
if self.source == o.source:
return [self.source]
return [Segment(o.source, self.source)]
return []
elif isinstance(o, Segment):
if o.p1 in self:
if o.p2 in self:
return [o]
return [Segment(o.p1, self.source)]
elif o.p2 in self:
return [Segment(o.p2, self.source)]
return []
elif isinstance(self, Segment):
if isinstance(o, Ray):
return o.intersection(self)
elif isinstance(o, Segment):
# A reminder that the points of Segments are ordered
# in such a way that the following works. See
# Segment.__new__ for details on the ordering.
if self.p1 not in o:
if self.p2 not in o:
# Neither of the endpoints are in o so either
# o is contained in this segment or it isn't
if o in self:
return [self]
return []
else:
# p1 not in o but p2 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p2 == o.p1:
return [o.p1]
return [Segment(o.p1, self.p2)]
elif self.p2 not in o:
# p2 not in o but p1 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p1 == o.p2:
return [o.p2]
return [Segment(o.p2, self.p1)]
# Both points of self in o so the whole segment
# is in o
return [self]
# Unknown linear entity
return []
# Not parallel, so find the point of intersection
px = simplify((b1*c2 - c1*b2) / t)
py = simplify((a2*c1 - a1*c2) / t)
inter = Point(px, py)
if inter in self and inter in o:
return [inter]
return []
raise NotImplementedError()
def random_point(self):
"""Returns a random point on this Ray."""
from random import randint
from sys import maxint
# The lower and upper
lower, upper = -maxint-1, maxint
if self.slope is S.Infinity:
if isinstance(self, Ray):
if self.ydirection is S.Infinity:
lower = self.p1[1]
else:
upper = self.p1[1]
elif isinstance(self, Segment):
lower = self.p1[1]
upper = self.p2[1]
x = self.p1[0]
y = randint(lower, upper)
else:
if isinstance(self, Ray):
if self.xdirection is S.Infinity:
lower = self.p1[0]
else:
upper = self.p1[0]
elif isinstance(self, Segment):
lower = self.p1[0]
upper = self.p2[0]
a,b,c = self.coefficients
x = randint(lower, upper)
y = simplify( (-c - a*x) / b )
return Point(x, y)
def __eq__(self, other):
raise NotImplementedError()
def __contains__(self, other):
raise NotImplementedError()
class Line(LinearEntity):
"""A line in space."""
def arbitrary_point(self, parameter_name='t'):
"""Returns a symbolic point that is on this line."""
t = C.Symbol(parameter_name, real=True)
x = simplify(self.p1[0] + t*(self.p2[0] - self.p1[0]))
y = simplify(self.p1[1] + t*(self.p2[1] - self.p1[1]))
return Point(x, y)
def plot_interval(self, parameter_name='t'):
"""Returns the plot interval for the default geometric plot of line"""
t = C.Symbol(parameter_name, real=True)
return [t, -5, 5]
def equation(self, xaxis_name='x', yaxis_name='y'):
"""
Returns the equation for this line. Optional parameters xaxis_name
and yaxis_name can be used to specify the names of the symbols used
for the equation.
"""
x = C.Symbol(xaxis_name, real=True)
y = C.Symbol(yaxis_name, real=True)
a,b,c = self.coefficients
return simplify(a*x + b*y + c)
def __contains__(self, o):
"""Return True if o is on this Line, or False otherwise."""
if isinstance(o, Line):
return self.__eq__(o)
elif isinstance(o, Point):
x = C.Symbol('x', real=True)
y = C.Symbol('y', real=True)
r = self.equation().subs({x: o[0], y: o[1]})
x = simplify(r)
return simplify(x) == 0
else:
return False
def __eq__(self, other):
"""Return True if other is equal to this Line, or False otherwise."""
if not isinstance(other, Line): return False
return Point.is_collinear(self.p1, self.p2, other.p1, other.p2)
class Ray(LinearEntity):
"""A ray in space."""
@property
def source(self):
"""The point from which the ray eminates."""
return self.p1
@property
def xdirection(self):
"""
The x direction of the ray. Positive infinity if the ray points in
the positive x direction, negative infinity if the ray points
in the negative x direction, or 0 if the ray is vertical.
"""
if self.p1[0] < self.p2[0]:
return S.Infinity
elif self.p1[0] == self.p2[0]:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""
The y direction of the ray. Positive infinity if the ray points in
the positive y direction, negative infinity if the ray points
in the negative y direction, or 0 if the ray is horizontal.
"""
if self.p1[1] < self.p2[1]:
return S.Infinity
elif self.p1[1] == self.p2[1]:
return S.Zero
else:
return S.NegativeInfinity
def __eq__(self, other):
"""Return True if other is equal to this Ray, or False otherwise."""
if not isinstance(other, Ray):
return False
return ((self.source == other.source) and (other.p2 in self))
def __contains__(self, o):
"""Return True if o is on this Ray, or False otherwise."""
if isinstance(o, Ray):
d = o.p2 - o.p1
return Point.is_collinear(self.p1, self.p2, o.p1, o.p2) \
and (self.xdirection == o.xdirection) \
and (self.ydirection == o.ydirection)
elif isinstance(o, Segment):
return ((o.p1 in self) and (o.p2 in self))
elif isinstance(o, Point):
if Point.is_collinear(self.p1, self.p2, o):
if (not self.p1[0].atoms(C.Symbol)) and (not self.p1[1].atoms(C.Symbol)) \
and (not self.p2[0].atoms(C.Symbol)) and (not self.p2[1].atoms(C.Symbol)):
if self.xdirection is S.Infinity:
return o[0] >= self.source[0]
elif self.xdirection is S.NegativeInfinity:
return o[0] <= self.source[0]
elif self.ydirection is S.Infinity:
return o[1] >= self.source[1]
return o[1] <= self.source[1]
else:
# There are symbols lying around, so assume that o
# is contained in this ray (for now)
return True
else:
# Points are not collinear, so the rays are not parallel
# and hence it isimpossible for self to contain o
return False
# No other known entity can be contained in a Ray
return False
class Segment(LinearEntity):
"""An undirected line segment in space."""
def __new__(cls, p1, p2, **kwargs):
# Reorder the two points under the following ordering:
# if p1[0] != p2[0] then p1[0] < p2[0]
# if p1[0] == p2[0] then p1[1] < p2[1]
if p1[0] > p2[0]:
p1, p2 = p2, p1
elif p1[0] == p2[0] and p1[1] > p2[0]:
p1, p2 = p2, p1
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def arbitrary_point(self, parameter_name='t'):
"""Returns a symbolic point that is on this line segment."""
t = C.Symbol(parameter_name, real=True)
x = simplify(self.p1[0] + t*(self.p2[0] - self.p1[0]))
y = simplify(self.p1[1] + t*(self.p2[1] - self.p1[1]))
return Point(x, y)
def plot_interval(self, parameter_name='t'):
t = C.Symbol(parameter_name, real=True)
return [t, 0, 1]
def perpendicular_bisector(self, p=None):
"""
Returns the perpendicular bisector of this segment. If no point is
specified or the point specified is not on the bisector then the
bisector is returned as a Line. Otherwise a Segment is returned
that joins the point specified and the intersection of the bisector
and the segment.
"""
l = LinearEntity.perpendicular_line(self, self.midpoint)
if p is None or p not in l:
return l
else:
return Segment(self.midpoint, p)
@property
def length(self):
"""The length of the segment."""
return Point.distance(self.p1, self.p2)
@property
def midpoint(self):
"""The midpoint of the segment."""
return Point.midpoint(self.p1, self.p2)
def __eq__(self, other):
"""Return True if other is equal to this Line, or False otherwise."""
if not isinstance(other, Segment):
return False
return ((self.p1 == other.p1) and (self.p2 == other.p2))
def __contains__(self, o):
"""Return True if o is on this Segment, or False otherwise."""
if isinstance(o, Segment):
return ((o.p1 in self) and (o.p2 in self))
elif isinstance(o, Point):
if Point.is_collinear(self.p1, self.p2, o):
x1,x2 = self.p1[0], self.p2[0]
if not (x1.atoms(C.Symbol)) or (x2.atoms(C.Symbol)):
return (min(x1,x2) <= o[0]) and (o[0] <= max(x1,x2))
else:
return True
else:
return False
# No other known entity can be contained in a Ray
return False
|
bsd-3-clause
| -1,037,525,842,769,122,200
| 36.323136
| 98
| 0.509631
| false
| 4.098257
| false
| false
| false
|
HewlettPackard/oneview-ansible
|
library/oneview_id_pools_ipv4_range.py
|
1
|
6647
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2021) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_id_pools_ipv4_range
short_description: Manage OneView ID pools IPV4 Range resources.
description:
- Provides an interface to manage ID pools IPV4 Range resources. Can create, update, or delete.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 6.0.0"
- "ansible >= 2.9"
author: "Thiago Miotto (@tmiotto)"
options:
state:
description:
- Indicates the desired state for the ID pools IPV4 Range resource.
C(present) will ensure data properties are compliant with OneView.
C(absent) will remove the resource from OneView, if it exists.
choices: ['present', 'absent']
data:
description:
- List with ID pools IPV4 Range properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Ensure that ID pools IPV4 Range is present using the default configuration
oneview_id_pools_ipv4_range:
config: "{{ config_file_path }}"
state: present
data:
name: 'Test ID pools IPV4 Range'
- name: Ensure that ID pools IPV4 Range is absent
oneview_id_pools_ipv4_range:
config: "{{ config_file_path }}"
state: absent
data:
name: 'ID pools IPV4 Range'
'''
RETURN = '''
id_pools_ipv4_range:
description: Has the facts about the OneView ID pools IPV4 Ranges.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class IdPoolsIpv4RangeModule(OneViewModule):
MSG_CREATED = 'ID pools IPV4 Range created successfully.'
MSG_UPDATED = 'ID pools IPV4 Range updated successfully.'
MSG_DELETED = 'ID pools IPV4 Range deleted successfully.'
MSG_ALREADY_PRESENT = 'ID pools IPV4 Range is already present.'
MSG_ALREADY_ABSENT = 'ID pools IPV4 Range is already absent.'
RESOURCE_FACT_NAME = 'id_pools_ipv4_range'
def __init__(self):
additional_arg_spec = dict(data=dict(required=True, type='dict'),
state=dict(
required=True,
choices=['present', 'absent']))
super(IdPoolsIpv4RangeModule, self).__init__(additional_arg_spec=additional_arg_spec,
validate_etag_support=True)
self.resource_client = self.oneview_client.id_pools_ipv4_ranges
def execute_module(self):
self.current_resource = None
# If Range URI is provided then it sets the resource client
if self.data.get('uri'):
self.current_resource = self.resource_client.get_by_uri(self.data.get('uri'))
# Do preliminary check before creating a new range
elif self.data.get('subnetUri') and self.data.get('name'):
subnet = self.oneview_client.id_pools_ipv4_subnets.get_by_uri(self.data.get('subnetUri'))
for range_uri in subnet.data['rangeUris']:
maybe_resource = self.resource_client.get_by_uri(range_uri)
if maybe_resource.data['name'] == self.data['name']:
self.current_resource = maybe_resource
if self.state == 'present':
return self._present()
elif self.state == 'absent':
return self.resource_absent()
def _present(self):
# If no resource was found during get operation, it creates new one
if not self.current_resource:
response = self.resource_present("id_pools_ipv4_range")
else:
# setting current resource for _update_resource
# Enabled can be True, False or None. Using not found default to false for comparison purposes.
enabled = self.data.pop('enabled', 'not_given')
# sets update_collector/update_allocator if Given to True.
update_collector = self.data.pop('update_collector', False)
update_allocator = self.data.pop('update_allocator', False)
id_list = self.data.pop('idList', False)
count = self.data.pop('count', False)
# In case newName is given it sets it correctly
if self.data.get('newName'):
self.data['name'] = self.data.pop('newName')
# It Performs the update operation
response = self.resource_present("id_pools_ipv4_range")
# Checks enabled status in latest data and performas accordingly
if enabled != 'not_given' and enabled != self.current_resource.data.get('enabled'):
response['msg'] = self.MSG_UPDATED
response['changed'] = True
response['ansible_facts']['id_pools_ipv4_range'] = \
self.resource_client.enable(dict(enabled=enabled, type='Range'), self.current_resource.data['uri'])
self.data['enabled'] = enabled
return response
elif update_collector:
response['msg'] = self.MSG_UPDATED
response['changed'] = True
self.data['idList'] = id_list
response['ansible_facts']['id_pools_ipv4_range'] = \
self.resource_client.update_collector(dict(idList=id_list), self.data.get('uri'))
return response
elif update_allocator:
self.data['idList'] = id_list
self.data['count'] = count
response['msg'] = self.MSG_UPDATED
response['changed'] = True
response['ansible_facts']['id_pools_ipv4_range'] = \
self.resource_client.update_allocator(dict(idList=id_list, count=count), self.data.get('uri'))
return response
def main():
IdPoolsIpv4RangeModule().run()
if __name__ == '__main__':
main()
|
apache-2.0
| 579,202,707,146,734,800
| 39.779141
| 119
| 0.613811
| false
| 4.062958
| false
| false
| false
|
nick-huang-cc/GraffitiSpaceTT
|
UnderstandStudyPython/awesome-python3-webapp/www/config_override.py
|
1
|
1354
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#Copyright (c) 1986 Nick Wong.
#Copyright (c) 2016-2026 TP-NEW Corp.
# License: TP-NEW (www.tp-new.com)
__author__ = "Nick Wong"
"""
如果要部署到服务器时,通常需要修改数据库的host等信息,直接修改config_default.py不是一个好办法,
更好的方法是编写一个config_override.py,用来覆盖某些默认设置
"""
#由于密码等信息保存于本地,所以先读取自定义的配置文档文档获取需要的密码:
#import properties
# import sys
# sys.path.append('E:\\GitHub\\GraffitiSpaceTT\\UnderstandStudyPython')
import properties
valueProperties = properties.getValue(object)
sys_address = valueProperties.get('service_sys_ip') #服务器地址
# sys_port = valueProperties.get('service_sys_mysql_port') #服务器端口
# sys_user = valueProperties.get('service_sys_admin_user') #系统登录用户
# sys_password = valueProperties.get('service_sys_admin_password') #系统登录用户密码
# mysql_user = valueProperties.get('service_mysql_ordinary_user') #mysql用户
# mysql_password = valueProperties.get('service_mysql_ordinary_password') #mysql用户密码
# mysql_database = valueProperties.get('service_mysql_database') #数据库名
configs = {
'db': {
'host': sys_address
}
}
|
agpl-3.0
| -1,793,919,373,691,236,600
| 32.272727
| 84
| 0.709472
| false
| 2.25
| false
| true
| false
|
volk0ff/fred
|
fred/settings.py
|
1
|
2229
|
"""
Django settings for fred project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hbvld(uz$xaze5)kw$&$*%wqwo%v)=im^3&p5)@!=@)i8kl4rn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*',
'www.breadandcircuits.org',
'127.0.0.1:8001']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'search',
'graph',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fred.urls'
WSGI_APPLICATION = 'fred.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
BASE_DIR,
)
#Templates
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
mit
| 7,103,168,443,666,669,000
| 21.979381
| 71
| 0.696276
| false
| 3.170697
| false
| false
| false
|
lfalvarez/votai
|
medianaranja2/forms.py
|
1
|
12510
|
# coding=utf-8
from django import forms
from popular_proposal.models import (PopularProposal
)
from elections.models import Area, QuestionCategory, Election
from django.conf import settings
from formtools.wizard.views import SessionWizardView
from medianaranja2.proposals_getter import ProposalsGetter, ProposalsGetterByReadingGroup
from django.shortcuts import render
from medianaranja2.calculator import Calculator
from constance import config
from organization_profiles.models import OrganizationTemplate
from django.views.generic.base import TemplateView
from django.core.cache import cache
from django.utils.safestring import mark_safe
from django.db.models import Q
from medianaranja2.grouped_multiple_choice_fields import GroupedModelMultiChoiceField
from medianaranja2.candidate_proposals_matrix_generator import OrganizationMatrixCreator
from django.forms import ModelForm
class CategoryMultipleChoiceField(forms.ModelMultipleChoiceField):
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def label_from_instance(self, obj):
return obj.name
class PositionChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.label
class ProposalModelMultipleChoiceField(GroupedModelMultiChoiceField):
def label_from_instance(self, obj):
return mark_safe( obj.get_one_liner() )
area_field = forms.ModelChoiceField(label=u"¿En qué comuna votas?",
help_text=u"Si quieres conocer con qué candidatura al Congreso eres más compatible, elige la comuna en la que votas. Si sólo te interesa tu media naranja presidencial, elige “no aplica”.",
empty_label=u"NO APLICA",
required=False,
queryset=Area.objects.filter(classification__in=settings.FILTERABLE_AREAS_TYPE).order_by('name'))
categories_field = CategoryMultipleChoiceField(label=u"De estos temas, ¿cuáles son los que te parecen más importantes para el país?",
queryset=QuestionCategory.objects.none(),
widget=forms.CheckboxSelectMultiple(),)
class SetupForm(forms.Form):
def __init__(self, *args, **kwargs):
should_use_categories = kwargs.pop('should_use_categories', True)
super(SetupForm, self).__init__(*args, **kwargs)
if should_use_categories:
self.fields['categories'] = categories_field
if settings.SECOND_ROUND_ELECTION is None:
self.fields['area'] = area_field
if 'categories' in self.fields:
self.fields['categories'].queryset = QuestionCategory.objects.all().order_by('-name')
else:
self.election = Election.objects.get(slug=settings.SECOND_ROUND_ELECTION)
if 'categories' in self.fields:
self.fields['categories'].queryset = self.election.categories.order_by('-name')
def clean(self):
cleaned_data = super(SetupForm, self).clean()
if settings.SECOND_ROUND_ELECTION is not None:
cleaned_data['element_selector'] = Election.objects.get(slug=settings.SECOND_ROUND_ELECTION)
else:
if cleaned_data['area'] is None:
cleaned_data['area'] = Area.objects.get(slug=config.DEFAULT_AREA)
if 'area' in cleaned_data.keys():
cleaned_data['element_selector'] = cleaned_data['area']
return cleaned_data
class QuestionsForm(forms.Form):
topic_fields = []
def __init__(self, *args, **kwargs):
categories = kwargs.pop('categories')
super(QuestionsForm, self).__init__(*args, **kwargs)
self.set_fields(categories)
def set_fields(self, categories):
self.categories = categories
for category in self.categories:
for topic in category.topics.order_by('id'):
field = PositionChoiceField(label=topic.label,
empty_label=None,
queryset=topic.positions,
widget=forms.RadioSelect
)
self.fields[topic.slug] = field
self.topic_fields.append(topic.slug)
def clean(self):
cleaned_data = super(QuestionsForm, self).clean()
r = {"positions": []}
for topic in cleaned_data:
if topic in self.topic_fields:
r['positions'].append(cleaned_data[topic])
else:
r[topic] = cleaned_data[topic]
return r
class ProposalsForm(forms.Form):
proposals = ProposalModelMultipleChoiceField(queryset=PopularProposal.objects.none(),
group_by_field='clasification',
widget=forms.CheckboxSelectMultiple(attrs={'class': 'proposal_option'}))
def __init__(self, *args, **kwargs):
self.proposals = kwargs.pop('proposals')
element_selector = kwargs.pop('element_selector')
super(ProposalsForm, self).__init__(*args, **kwargs)
proposals_qs_cache_key = 'proposals_qs_' + str(element_selector.id)
if cache.get(proposals_qs_cache_key) is not None:
self.fields['proposals'].queryset = cache.get(proposals_qs_cache_key)
return
self.proposals = self.proposals[:config.MEDIA_NARANJA_MAX_NUM_PR]
qs = PopularProposal.objects.filter(id__in=[p.id for p in self.proposals]).order_by('clasification')
cache.set(proposals_qs_cache_key, qs)
self.fields['proposals'].queryset = qs
class MediaNaranjaException(Exception):
pass
class MediaNaranjaWizardFormBase(SessionWizardView):
template_name = 'medianaranja2/paso_default.html'
done_template_name = 'medianaranja2/resultado.html'
calculator_class = Calculator
calculator_extra_kwargs = {}
def get_proposal_class(self):
if config.ESTRATEGIA_SELECCION_PROPUESTAS == 'reading_group':
return ProposalsGetterByReadingGroup
return ProposalsGetter
def get_proposal_getter_kwargs(self):
return {}
def get_proposal_getter(self):
return self.get_proposal_class()(**self.get_proposal_getter_kwargs())
def get_organization_templates(self, proposals):
if settings.RECOMMENDED_ORGS_FROM_CACHE:
c = OrganizationMatrixCreator()
return c.get_organizations(proposals)
else:
is_creator_of_this_proposals_filter = Q(organization__proposals__in=proposals)
is_liker_of_this_proposals = Q(organization__likes__proposal__in=proposals)
organization_templates = OrganizationTemplate.objects.filter(is_creator_of_this_proposals_filter|is_liker_of_this_proposals).distinct()
return organization_templates
def done(self, form_list, **kwargs):
cleaned_data = self.get_all_cleaned_data()
results = []
has_parent = True
element_selector = self.get_element_selector_from_cleaned_data(cleaned_data)
elections = self.get_proposal_getter().get_elections(element_selector)
proposals = cleaned_data.get('proposals', [])
positions = cleaned_data.get('positions', [])
for election in elections:
calculator = self.calculator_class(election, positions, proposals, **self.calculator_extra_kwargs)
results.append(calculator.get_result())
if settings.ORGANIZATIONS_IN_12_RESULT:
organization_templates = self.get_organization_templates(proposals)
else:
organization_templates = []
return render(self.request, self.done_template_name, {
'results': results,
'organizations': organization_templates
})
def get_template_names(self):
return [self.templates[self.steps.current]]
def post(self, *args, **kwargs):
try:
return super(MediaNaranjaWizardFormBase, self).post(*args, **kwargs)
except MediaNaranjaException:
self.storage.reset()
self.storage.current_step = self.steps.first
return self.render(self.get_form())
def get_categories_form_kwargs(self, cleaned_data):
return {'categories': list(cleaned_data['categories'])}
def get_element_selector_from_cleaned_data(self, cleaned_data):
if 'element_selector' not in cleaned_data:
return Area.objects.get(slug=config.DEFAULT_AREA)
return cleaned_data['element_selector']
def get_proposals_form_kwargs(self, cleaned_data):
proposal_getter_kwargs = self.get_proposal_getter_kwargs()
getter = self.get_proposal_class()(**proposal_getter_kwargs)
element_selector = self.get_element_selector_from_cleaned_data(cleaned_data)
proposals = getter.get_all_proposals(element_selector)
return {'proposals': proposals, 'element_selector': element_selector}
def get_kwargs_from_step_number(self, number, cleaned_data):
func_name = self.steps_and_functions.get(number, None)
if func_name is None:
return {}
func = getattr(self, func_name, None)
return func(cleaned_data)
def get_form_kwargs(self, step):
step = int(step)
cleaned_data = {}
if step:
cleaned_data = self.get_cleaned_data_for_step(str(0))
if cleaned_data is None:
raise MediaNaranjaException()
return self.get_kwargs_from_step_number(step, cleaned_data)
class MediaNaranjaWizardForm(MediaNaranjaWizardFormBase):
form_list = [SetupForm, QuestionsForm, ProposalsForm]
steps_and_functions = {
1: 'get_categories_form_kwargs',
2: 'get_proposals_form_kwargs'
}
templates = {"0": "medianaranja2/paso_0_setup.html",
"1": "medianaranja2/paso_1_preguntas_y_respuestas.html",
"2": "medianaranja2/paso_2_proposals_list.html"}
class MediaNaranjaNoQuestionsWizardForm(MediaNaranjaWizardFormBase):
form_list = [SetupForm, ProposalsForm]
steps_and_functions = {
1: 'get_proposals_form_kwargs'
}
templates = {"0": "medianaranja2/paso_0_setup.html",
"1": "medianaranja2/paso_2_proposals_list.html"}
def get_form_kwargs(self, step):
kwargs = super(MediaNaranjaNoQuestionsWizardForm, self).get_form_kwargs(step)
if step == '0':
kwargs['should_use_categories'] = False
return kwargs
class MediaNaranjaOnlyProposals(MediaNaranjaWizardFormBase):
form_list = [ProposalsForm, ]
steps_and_functions = {
0: 'get_proposals_form_kwargs'
}
templates = {"0": "medianaranja2/paso_2_proposals_list.html"}
class MediaNaranjaResultONLYFORDEBUG(TemplateView):# pragma: no cover
template_name = 'medianaranja2/resultado.html'
def get_context_data(self, **kwargs):
context = super(MediaNaranjaResultONLYFORDEBUG, self).get_context_data(**kwargs)
from elections.models import Candidate, Election
from organization_profiles.models import OrganizationTemplate
templates = OrganizationTemplate.objects.all()[:3]
context['organizations'] = templates
e1 = Election.objects.exclude(candidates__isnull=True)[0]
context['results'] = [
{'election': e1,
'candidates': [{'value': 2.0, 'candidate': e1.candidates.all()[0]},
{'value': 1.0, 'candidate': e1.candidates.all()[1]},
{'value': 0.5, 'candidate': e1.candidates.all()[2]}]}]
return context
from medianaranja2.models import SharedResult
class ShareForm(ModelForm):
object_id = forms.CharField()
percentage = forms.FloatField(required=False)
class Meta:
model = SharedResult
fields = ['object_id', 'percentage']
def __init__(self, *args, **kwargs):
self.content_type = kwargs.pop('content_type')
super(ShareForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(ShareForm, self).save(commit=False)
instance.content_type = self.content_type
instance.data = self.cleaned_data
if commit:
instance.save()
return instance
|
gpl-3.0
| -2,809,643,480,602,538,500
| 40.936242
| 224
| 0.641114
| false
| 3.875039
| false
| false
| false
|
sreidy/roboticsclub.org
|
api/urls.py
|
2
|
1047
|
from rest_framework import routers, serializers, viewsets
from .views import *
from django.conf.urls import include, patterns, url
router = routers.DefaultRouter()
router.register(r'api_requests', APIRequestViewSet)
router.register(r'webcams', WebcamViewSet)
router.register(r'datetime', DateTimeViewSet, base_name="datetime")
router.register(r'users', RoboUserViewSet)
router.register(r'officers', OfficerViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'channels', ChannelViewSet, base_name="channels")
router.register(r'calendar', CalendarViewSet, base_name="calendar")
router.register(r'sponsors', SponsorViewSet)
router.register(r'social_medias', SocialMediaViewSet)
router.register(r'machines', MachineViewSet)
router.register(r'faq', CategoryViewSet)
router.register(r'tshirts', TShirtViewSet)
router.register(r'posters', PosterViewSet)
router.register(r'upcs', UPCItemViewSet, base_name="upcs")
urlpatterns = router.urls + [
url(r'^magnetic/$', MagneticView.as_view()),
url(r'^rfid/$', RFIDView.as_view()),
]
|
mit
| -4,484,347,834,260,364,000
| 37.777778
| 67
| 0.78319
| false
| 3.221538
| false
| true
| false
|
bastibl/gnuradio
|
gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
|
1
|
16372
|
#!/usr/bin/env python
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
import numpy
from gnuradio import gr, gr_unittest, digital, blocks
import pmt
class qa_ofdm_frame_equalizer_vcvc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.tsb_key = "tsb_key"
def tearDown (self):
self.tb = None
def test_001_simple (self):
""" Very simple functionality testing:
- static equalizer
- init channel state with all ones
- transmit all ones
- make sure we rx all ones
- Tag check: put in frame length tag and one other random tag,
make sure they're propagated
"""
fft_len = 8
equalizer = digital.ofdm_equalizer_static(fft_len)
n_syms = 3
tx_data = (1,) * fft_len * n_syms
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = "ofdm_sync_chan_taps"
chan_tag.value = pmt.init_c32vector(fft_len, (1,) * fft_len)
random_tag = gr.tag_t()
random_tag.offset = 1
random_tag.key = "foo"
random_tag.value = pmt.from_long(42)
src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, random_tag))
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key)
sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key),
eq,
sink
)
self.tb.run ()
# Check data
self.assertEqual(tx_data, sink.data()[0])
# Check tags
tag_dict = dict()
for tag in sink.tags():
ptag = gr.tag_to_python(tag)
tag_dict[ptag.key] = ptag.value
expected_dict = {
'foo': 42
}
self.assertEqual(tag_dict, expected_dict)
def test_001b_simple_skip_nothing (self):
"""
Same as before, but put a skip-header in there
"""
fft_len = 8
equalizer = digital.ofdm_equalizer_static(fft_len, symbols_skipped=1)
n_syms = 3
tx_data = (1,) * fft_len * n_syms
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = "ofdm_sync_chan_taps"
chan_tag.value = pmt.init_c32vector(fft_len, (1,) * fft_len)
src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag,))
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key)
sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key),
eq,
sink
)
self.tb.run ()
# Check data
self.assertEqual(tx_data, sink.data()[0])
def test_001c_carrier_offset_no_cp (self):
"""
Same as before, but put a carrier offset in there
"""
fft_len = 8
cp_len = 0
n_syms = 1
carr_offset = 1
occupied_carriers = ((-2, -1, 1, 2),)
tx_data = (
0, 0, 0, -1j, -1j, 0, -1j, -1j,
)
# The rx'd signal is shifted
rx_expected = (0, 0, 1, 1, 0, 1, 1, 0) * n_syms
equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers)
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = "ofdm_sync_chan_taps"
# Note: this is shifted to the correct position!
chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, -1j, -1j, 0, -1j, -1j, 0))
offset_tag = gr.tag_t()
offset_tag.offset = 0
offset_tag.key = "ofdm_sync_carr_offset"
offset_tag.value = pmt.from_long(carr_offset)
src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag))
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key)
sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key),
eq,
sink
)
self.tb.run ()
# Check data
self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)
def test_001c_carrier_offset_cp (self):
"""
Same as before, but put a carrier offset in there and a CP
"""
fft_len = 8
cp_len = 2
n_syms = 3
# cp_len/fft_len == 1/4, therefore, the phase is rotated by
# carr_offset * \pi/2 in every symbol
occupied_carriers = ((-2, -1, 1, 2),)
carr_offset = -1
tx_data = (
0,-1j,-1j, 0,-1j,-1j, 0, 0,
0, -1, -1, 0, -1, -1, 0, 0,
0, 1j, 1j, 0, 1j, 1j, 0, 0,
)
# Rx'd signal is corrected
rx_expected = (0, 0, 1, 1, 0, 1, 1, 0) * n_syms
equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers)
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = "ofdm_sync_chan_taps"
chan_tag.value = pmt.init_c32vector(fft_len, (0, 0, 1, 1, 0, 1, 1, 0))
offset_tag = gr.tag_t()
offset_tag.offset = 0
offset_tag.key = "ofdm_sync_carr_offset"
offset_tag.value = pmt.from_long(carr_offset)
src = blocks.vector_source_c(tx_data, False, fft_len, (chan_tag, offset_tag))
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), cp_len, self.tsb_key)
sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, n_syms, self.tsb_key),
eq,
sink
)
self.tb.run ()
# Check data
self.assertComplexTuplesAlmostEqual(rx_expected, sink.data()[0], places=4)
def test_002_static (self):
"""
- Add a simple channel
- Make symbols QPSK
"""
fft_len = 8
# 4 5 6 7 0 1 2 3
tx_data = [-1, -1, 1, 2, -1, 3, 0, -1, # 0
-1, -1, 0, 2, -1, 2, 0, -1, # 8
-1, -1, 3, 0, -1, 1, 0, -1, # 16 (Pilot symbols)
-1, -1, 1, 1, -1, 0, 2, -1] # 24
cnst = digital.constellation_qpsk()
tx_signal = [cnst.map_to_points_v(x)[0] if x != -1 else 0 for x in tx_data]
occupied_carriers = ((1, 2, 6, 7),)
pilot_carriers = ((), (), (1, 2, 6, 7), ())
pilot_symbols = (
[], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []
)
equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols)
channel = [
0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 1, 1, 0, 1, 1, 0, # These coefficients will be rotated slightly (but less than \pi/2)
0, 0, 1j, 1j, 0, 1j, 1j, 0, # Go crazy here!
0, 0, 1j, 1j, 0, 1j, 1j, 0
]
channel = [
0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 1, 1, 0, 1, 1, 0, # These coefficients will be rotated slightly (but less than \pi/2)
0, 0, 1j, 1j, 0, 1j, 1j, 0, # Go crazy here!
0, 0, 1j, 1j, 0, 1j, 1j, 0
]
for idx in range(fft_len, 2*fft_len):
channel[idx] = channel[idx-fft_len] * numpy.exp(1j * .1 * numpy.pi * (numpy.random.rand()-.5))
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = "ofdm_sync_chan_taps"
chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len])
src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,))
sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key)
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, len(tx_data) // fft_len, self.tsb_key),
eq,
sink
)
self.tb.run ()
rx_data = [cnst.decision_maker_v((x,)) if x != 0 else -1 for x in sink.data()[0]]
# Check data
self.assertEqual(tx_data, rx_data)
# Check tags
tag_dict = dict()
for tag in sink.tags():
ptag = gr.tag_to_python(tag)
tag_dict[ptag.key] = ptag.value
if ptag.key == 'ofdm_sync_chan_taps':
tag_dict[ptag.key] = list(pmt.c32vector_elements(tag.value))
else:
tag_dict[ptag.key] = pmt.to_python(tag.value)
expected_dict = {
'ofdm_sync_chan_taps': channel[-fft_len:]
}
self.assertEqual(tag_dict, expected_dict)
def test_002_static_wo_tags (self):
""" Same as before, but the input stream has no tag.
We specify the frame size in the constructor.
We also specify a tag key, so the output stream *should* have
a TSB tag.
"""
fft_len = 8
n_syms = 4
# 4 5 6 7 0 1 2 3
tx_data = [-1, -1, 1, 2, -1, 3, 0, -1, # 0
-1, -1, 0, 2, -1, 2, 0, -1, # 8
-1, -1, 3, 0, -1, 1, 0, -1, # 16 (Pilot symbols)
-1, -1, 1, 1, -1, 0, 2, -1] # 24
cnst = digital.constellation_qpsk()
tx_signal = [cnst.map_to_points_v(x)[0] if x != -1 else 0 for x in tx_data]
occupied_carriers = ((1, 2, 6, 7),)
pilot_carriers = ((), (), (1, 2, 6, 7), ())
pilot_symbols = (
[], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []
)
equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols)
channel = [
0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 1, 1, 0, 1, 1, 0, # These coefficients will be rotated slightly (below)...
0, 0, 1j, 1j, 0, 1j, 1j, 0, # Go crazy here!
0, 0, 1j, 1j, 0, 1j, 1j, 0 # ...and again here.
]
for idx in range(fft_len, 2*fft_len):
channel[idx] = channel[idx-fft_len] * numpy.exp(1j * .1 * numpy.pi * (numpy.random.rand()-.5))
idx2 = idx+2*fft_len
channel[idx2] = channel[idx2] * numpy.exp(1j * 0 * numpy.pi * (numpy.random.rand()-.5))
src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len)
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, False, n_syms)
sink = blocks.tsb_vector_sink_c(vlen=fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, len(tx_data) // fft_len, self.tsb_key),
eq,
sink
)
self.tb.run ()
rx_data = [cnst.decision_maker_v((x,)) if x != 0 else -1 for x in sink.data()[0]]
self.assertEqual(tx_data, rx_data)
# Check TSB Functionality
packets = sink.data()
self.assertEqual(len(packets), 1)
self.assertEqual(len(packets[0]), len(tx_data))
def test_002_static_wo_tags (self):
fft_len = 8
# 4 5 6 7 0 1 2 3
tx_data = [-1, -1, 1, 2, -1, 3, 0, -1, # 0
-1, -1, 0, 2, -1, 2, 0, -1, # 8
-1, -1, 3, 0, -1, 1, 0, -1, # 16 (Pilot symbols)
-1, -1, 1, 1, -1, 0, 2, -1] # 24
cnst = digital.constellation_qpsk()
tx_signal = [cnst.map_to_points_v(x)[0] if x != -1 else 0 for x in tx_data]
occupied_carriers = ((1, 2, 6, 7),)
pilot_carriers = ((), (), (1, 2, 6, 7), ())
pilot_symbols = (
[], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []
)
equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers, pilot_carriers, pilot_symbols)
channel = [
0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 1, 1, 0, 1, 1, 0, # These coefficients will be rotated slightly...
0, 0, 1j, 1j, 0, 1j, 1j, 0, # Go crazy here!
0, 0, 1j, 1j, 0, 1j, 1j, 0 # ...and again here.
]
for idx in range(fft_len, 2*fft_len):
channel[idx] = channel[idx-fft_len] * numpy.exp(1j * .1 * numpy.pi * (numpy.random.rand()-.5))
idx2 = idx+2*fft_len
channel[idx2] = channel[idx2] * numpy.exp(1j * 0 * numpy.pi * (numpy.random.rand()-.5))
src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len)
sink = blocks.vector_sink_c(fft_len)
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, "", False, 4)
self.tb.connect(src, eq, sink)
self.tb.run ()
rx_data = [cnst.decision_maker_v((x,)) if x != 0 else -1 for x in sink.data()]
self.assertEqual(tx_data, rx_data)
def test_002_simpledfe (self):
""" Use the simple DFE equalizer. """
fft_len = 8
# 4 5 6 7 0 1 2 3
tx_data = [-1, -1, 1, 2, -1, 3, 0, -1, # 0
-1, -1, 0, 2, -1, 2, 0, -1, # 8
-1, -1, 3, 0, -1, 1, 0, -1, # 16 (Pilot symbols)
-1, -1, 1, 1, -1, 0, 2, -1] # 24
cnst = digital.constellation_qpsk()
tx_signal = [cnst.map_to_points_v(x)[0] if x != -1 else 0 for x in tx_data]
occupied_carriers = ((1, 2, 6, 7),)
pilot_carriers = ((), (), (1, 2, 6, 7), ())
pilot_symbols = (
[], [], [cnst.map_to_points_v(x)[0] for x in (1, 0, 3, 0)], []
)
equalizer = digital.ofdm_equalizer_simpledfe(
fft_len, cnst.base(), occupied_carriers, pilot_carriers, pilot_symbols, 0, 0.01
)
channel = [
0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 1, 1, 0, 1, 1, 0, # These coefficients will be rotated slightly...
0, 0, 1j, 1j, 0, 1j, 1j, 0, # Go crazy here!
0, 0, 1j, 1j, 0, 1j, 1j, 0 # ...and again here.
]
for idx in range(fft_len, 2*fft_len):
channel[idx] = channel[idx-fft_len] * numpy.exp(1j * .1 * numpy.pi * (numpy.random.rand()-.5))
idx2 = idx+2*fft_len
channel[idx2] = channel[idx2] * numpy.exp(1j * 0 * numpy.pi * (numpy.random.rand()-.5))
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = "ofdm_sync_chan_taps"
chan_tag.value = pmt.init_c32vector(fft_len, channel[:fft_len])
src = blocks.vector_source_c(numpy.multiply(tx_signal, channel), False, fft_len, (chan_tag,))
eq = digital.ofdm_frame_equalizer_vcvc(equalizer.base(), 0, self.tsb_key, True)
sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, len(tx_data) // fft_len, self.tsb_key),
eq,
sink
)
self.tb.run ()
rx_data = [cnst.decision_maker_v((x,)) if x != 0 else -1 for x in sink.data()[0]]
self.assertEqual(tx_data, rx_data)
self.assertEqual(len(sink.tags()), 1)
tag = sink.tags()[0]
self.assertEqual(tag.key, "ofdm_sync_chan_taps")
self.assertComplexTuplesAlmostEqual(list(pmt.c32vector_elements(tag.value)), channel[-fft_len:], places=1)
if __name__ == '__main__':
gr_unittest.run(qa_ofdm_frame_equalizer_vcvc, "qa_ofdm_frame_equalizer_vcvc.xml")
|
gpl-3.0
| 3,725,604,465,256,258,600
| 42.084211
| 117
| 0.524127
| false
| 2.953635
| true
| false
| false
|
google/feedloader
|
appengine/uploader/shoptimizer_client.py
|
1
|
9956
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client to send product data to the Shoptimizer (optimization) API and parse the results."""
import json
import logging
from typing import Any, Dict
import requests
import constants
_CONFIG_FILE_PATH = 'config/shoptimizer_config.json'
_ERROR_MSG_TEMPLATE = ('Request for batch #%d with operation %s encountered an '
'error: %s. Error: %s')
_METADATA_SERVER_TOKEN_URL = 'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience='
class ShoptimizerClient(object):
"""Client to send product data to the Shoptimizer (optimization) API and parse the results."""
def __init__(self, batch_number: int, operation: constants.Operation):
"""Inits ShoptimizerClient.
Args:
batch_number: The number that identifies this batch.
operation: The operation to be performed on this batch (upsert, delete,
prevent_expiring).
"""
self._batch_number = batch_number
self._operation = operation
self._optimization_params = _load_optimization_params(
self._batch_number, self._operation)
self._config_params = _load_config_params()
def shoptimize(self, batch: constants.Batch) -> constants.Batch:
"""Optimizes a batch of product data by sending it to the Shoptimizer (optimization) API.
Args:
batch: The batch of product data to be optimized.
Returns:
The optimized batch of product data if no errors encountered,
or the original batch of product data otherwise.
"""
if not self._is_input_valid(batch):
return batch
try:
response_dict = self._send_to_shoptimizer(batch)
except (TypeError, requests.exceptions.RequestException, ValueError):
return batch
# Checks for some top-level failure in response
# (received response in correct format without exceptions,
# but something went wrong in Shoptimizer)
if response_dict.get('error-msg', ''):
logging.error(_ERROR_MSG_TEMPLATE, self._batch_number,
self._operation.value,
'Encountered an error in the Shoptimizer API response',
response_dict['error-msg'])
return batch
self._log_results(response_dict)
return response_dict.get('optimized-data', batch)
def _is_input_valid(self, batch: constants.Batch) -> bool:
"""Checks input parameters are valid.
Args:
batch: The batch of product data to be optimized.
Returns:
True if the input is valid, False otherwise.
"""
if not constants.SHOPTIMIZER_BASE_URL:
logging.warning(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'Shoptimizer API URL is not set. '
'Check the SHOPTIMIZER_URL environment variable is correctly set', '')
return False
if not batch:
logging.warning(_ERROR_MSG_TEMPLATE, self._batch_number,
self._operation.value,
'Batch was empty. Shoptimizer API not called', '')
return False
if not self._optimization_params:
logging.info(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'Optimization parameters were empty. Shoptimizer API not called')
return False
if 'true' not in [
val.lower() for val in self._optimization_params.values()
]:
logging.info(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'no true optimization parameter. Shoptimizer API not called.', '')
return False
return True
def _send_to_shoptimizer(self, batch) -> Dict[str, Any]:
"""Logs errors returned by individual Shoptimizer API optimizers.
Args:
batch: The batch of product data to be optimized.
Returns:
A dictionary containing the results of the Shoptimizer API call.
"""
try:
batch_as_json = json.dumps(batch)
except TypeError as type_error:
logging.exception(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'Failed to convert batch to JSON. Shoptimizer API not called',
type_error)
raise
try:
jwt = self._get_jwt()
except requests.exceptions.RequestException:
raise
try:
headers = {
'Authorization': f'bearer {jwt}',
'Content-Type': 'application/json'
}
request_params = {}
request_params.update(self._optimization_params)
request_params.update(self._config_params)
response = requests.request(
'POST',
constants.SHOPTIMIZER_ENDPOINT,
data=batch_as_json,
headers=headers,
params=request_params)
response.raise_for_status()
response_dict = json.loads(response.text)
except requests.exceptions.RequestException as request_exception:
logging.exception(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'Did not receive a successful response from the Shoptimizer API',
request_exception)
raise
except ValueError as value_error:
logging.exception(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'Failed to deserialize JSON returned from Shoptimizer API',
value_error)
raise
return response_dict
def _get_jwt(self) -> str:
"""Retrieves a JSON web token from the Google metadata server for Cloud Run authentication.
Returns:
A JSON web token that can be used for Cloud Run authentication.
"""
try:
token_request_url = _METADATA_SERVER_TOKEN_URL + constants.SHOPTIMIZER_BASE_URL
token_request_headers = {'Metadata-Flavor': 'Google'}
# Fetches the token
response = requests.get(token_request_url, headers=token_request_headers)
response.raise_for_status()
jwt = response.content.decode('utf-8')
except requests.exceptions.RequestException as request_exception:
logging.exception(
_ERROR_MSG_TEMPLATE, self._batch_number, self._operation.value,
'Failed get an authentication JWT. Shoptimizer API not called',
request_exception)
raise
return jwt
def _log_results(self, response_dict: Dict[str, Any]) -> None:
"""Logs the results of the call to the Shoptimizer API.
Args:
response_dict: The results of the call to the Shoptimizer API.
"""
optimization_results = response_dict.get('optimization-results', '')
plugin_results = response_dict.get('plugin-results', '')
self._log_optimizer_error_msgs(optimization_results)
self._log_optimizer_error_msgs(plugin_results)
logging.info(
'Shoptimizer API finished running for batch #%d with operation %s. '
'Optimizer Results: %s | Plugin Results: %s', self._batch_number,
self._operation.value, optimization_results, plugin_results)
def _log_optimizer_error_msgs(
self, shoptimizer_results: Dict[str, Dict[str, Any]]) -> None:
"""Logs errors returned by individual Shoptimizer API optimizers.
Args:
shoptimizer_results: The results of each individual optimizer returned
from the Shoptimizer API.
"""
if not shoptimizer_results:
return
for optimizer_name, optimizer_results in shoptimizer_results.items():
if optimizer_results.get('result', '') == 'failure':
logging.error(
'Request for batch #%d with operation %s encountered an error when '
'running optimizer %s. Error: %s', self._batch_number,
self._operation.value, optimizer_name,
optimizer_results.get('error_msg', '(error_msg missing)'))
def _load_optimization_params(batch_number: int,
operation: constants.Operation) -> Dict[str, str]:
"""Loads optimization parameters for the Shoptimizer API.
Args:
batch_number: The number that identifies this batch.
operation: The operation to be performed on this batch (upsert, delete,
prevent_expiring).
Returns:
The optimization parameters for the Shoptimizer API.
"""
try:
with open(_CONFIG_FILE_PATH) as shoptimizer_config:
optimization_params = json.loads(shoptimizer_config.read())
except OSError as os_error:
logging.exception(
_ERROR_MSG_TEMPLATE, batch_number, operation.value,
'Failed to read the shoptimizer config. '
'Check config/shoptimizer_config.json exists and has read permissions. '
'Shoptimizer API not called', os_error)
raise
except ValueError as value_error:
logging.exception(
_ERROR_MSG_TEMPLATE, batch_number, operation.value,
'Failed to read the shoptimizer config. '
'Check config/shoptimizer_config.json is valid JSON. '
'Shoptimizer API not called', value_error)
raise
return optimization_params
def _load_config_params() -> Dict[str, str]:
"""Loads configuration parameters for the Shoptimizer API.
The configuration parameters include attributes listed below:
- lang: The code of the language.
- country: The code of the country.
- currency: The code of the currency.
Returns:
The configuration parameters for the Shoptimizer API.
"""
config_params = {
'lang': constants.CONTENT_LANGUAGE,
'country': constants.TARGET_COUNTRY,
'currency': constants.TARGET_CURRENCY,
}
return config_params
|
apache-2.0
| -8,745,425,183,561,239,000
| 34.304965
| 118
| 0.67045
| false
| 4.249253
| true
| false
| false
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/pyatspi/state.py
|
1
|
6076
|
#Copyright (C) 2008 Codethink Ltd
#copyright: Copyright (c) 2005, 2007 IBM Corporation
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License version 2 as published by the Free Software Foundation.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#Portions of this code originally licensed and copyright (c) 2005, 2007
#IBM Corporation under the BSD license, available at
#U{http://www.opensource.org/licenses/bsd-license.php}
#authors: Peter Parente, Mark Doffman
from gi.repository import Atspi
from gi.repository import GObject
from enum import Enum as _Enum
#------------------------------------------------------------------------------
class StateType(_Enum):
_enum_lookup = {
0:'STATE_INVALID',
1:'STATE_ACTIVE',
2:'STATE_ARMED',
3:'STATE_BUSY',
4:'STATE_CHECKED',
5:'STATE_COLLAPSED',
6:'STATE_DEFUNCT',
7:'STATE_EDITABLE',
8:'STATE_ENABLED',
9:'STATE_EXPANDABLE',
10:'STATE_EXPANDED',
11:'STATE_FOCUSABLE',
12:'STATE_FOCUSED',
13:'STATE_HAS_TOOLTIP',
14:'STATE_HORIZONTAL',
15:'STATE_ICONIFIED',
16:'STATE_MODAL',
17:'STATE_MULTI_LINE',
18:'STATE_MULTISELECTABLE',
19:'STATE_OPAQUE',
20:'STATE_PRESSED',
21:'STATE_RESIZABLE',
22:'STATE_SELECTABLE',
23:'STATE_SELECTED',
24:'STATE_SENSITIVE',
25:'STATE_SHOWING',
26:'STATE_SINGLE_LINE',
27:'STATE_STALE',
28:'STATE_TRANSIENT',
29:'STATE_VERTICAL',
30:'STATE_VISIBLE',
31:'STATE_MANAGES_DESCENDANTS',
32:'STATE_INDETERMINATE',
33:'STATE_REQUIRED',
34:'STATE_TRUNCATED',
35:'STATE_ANIMATED',
36:'STATE_INVALID_ENTRY',
37:'STATE_SUPPORTS_AUTOCOMPLETION',
38:'STATE_SELECTABLE_TEXT',
39:'STATE_IS_DEFAULT',
40:'STATE_VISITED',
41:'STATE_LAST_DEFINED',
}
#------------------------------------------------------------------------------
STATE_ACTIVE = StateType(1)
STATE_ANIMATED = StateType(35)
STATE_ARMED = StateType(2)
STATE_BUSY = StateType(3)
STATE_CHECKED = StateType(4)
STATE_COLLAPSED = StateType(5)
STATE_DEFUNCT = StateType(6)
STATE_EDITABLE = StateType(7)
STATE_ENABLED = StateType(8)
STATE_EXPANDABLE = StateType(9)
STATE_EXPANDED = StateType(10)
STATE_FOCUSABLE = StateType(11)
STATE_FOCUSED = StateType(12)
STATE_HAS_TOOLTIP = StateType(13)
STATE_HORIZONTAL = StateType(14)
STATE_ICONIFIED = StateType(15)
STATE_INDETERMINATE = StateType(32)
STATE_INVALID = StateType(0)
STATE_INVALID_ENTRY = StateType(36)
STATE_IS_DEFAULT = StateType(39)
STATE_LAST_DEFINED = StateType(41)
STATE_MANAGES_DESCENDANTS = StateType(31)
STATE_MODAL = StateType(16)
STATE_MULTISELECTABLE = StateType(18)
STATE_MULTI_LINE = StateType(17)
STATE_OPAQUE = StateType(19)
STATE_PRESSED = StateType(20)
STATE_REQUIRED = StateType(33)
STATE_RESIZABLE = StateType(21)
STATE_SELECTABLE = StateType(22)
STATE_SELECTABLE_TEXT = StateType(38)
STATE_SELECTED = StateType(23)
STATE_SENSITIVE = StateType(24)
STATE_SHOWING = StateType(25)
STATE_SINGLE_LINE = StateType(26)
STATE_STALE = StateType(27)
STATE_SUPPORTS_AUTOCOMPLETION = StateType(37)
STATE_TRANSIENT = StateType(28)
STATE_TRUNCATED = StateType(34)
STATE_VERTICAL = StateType(29)
STATE_VISIBLE = StateType(30)
STATE_VISITED = StateType(40)
#------------------------------------------------------------------------------
# Build a dictionary mapping state values to names based on the prefix of the enum constants.
STATE_VALUE_TO_NAME = dict(((value, name[6:].lower().replace('_', ' '))
for name, value
in globals().items()
if name.startswith('STATE_')))
#------------------------------------------------------------------------------
def _marshal_state_set(bitfield):
"""
The D-Bus protocol has a stateset object passed
as a 64bit bitfield. The Bits are passed as two 32bit
integers.
This function marshals the D-Bus message into a
StateSet object that corresponds to these states.
"""
(lower, upper) = bitfield
states = []
pos = 0
while (lower):
if (1L)&lower:
states.append(StateType(pos))
pos+=1
lower >>= 1
pos = 32
while (upper):
if (1L)&upper:
states.append(StateType(pos))
pos+=1
upper >>= 1
return StateSet(*states)
#------------------------------------------------------------------------------
def stateset_init(self, *states):
GObject.GObject.__init__(self)
map(self.add, states)
# TODO: Probably remove this hack for 2.2, since BGO#646581 is fixed
def StateSet_getStates(self):
ret = []
for i in range(0, 64):
if (self.states & (1 << i)):
ret.append(Atspi.StateType(i))
return ret
StateSet = Atspi.StateSet
StateSet.getStates = StateSet_getStates
StateSet.isEmpty = StateSet.is_empty
StateSet.raw = lambda x: x
StateSet.unref = lambda x: None
StateSet.__init__ = stateset_init
|
gpl-3.0
| -7,356,553,817,032,895,000
| 33.134831
| 93
| 0.563858
| false
| 3.92
| false
| false
| false
|
Cadene/pretrained-models.pytorch
|
pretrainedmodels/models/inceptionv4.py
|
1
|
11479
|
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import os
import sys
__all__ = ['InceptionV4', 'inceptionv4']
pretrained_settings = {
'inceptionv4': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_3a(nn.Module):
def __init__(self):
super(Mixed_3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_4a(nn.Module):
def __init__(self):
super(Mixed_4a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 64, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(64, 64, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(64, 96, kernel_size=(3,3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_5a(nn.Module):
def __init__(self):
super(Mixed_5a, self).__init__()
self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class Inception_A(nn.Module):
def __init__(self):
super(Inception_A, self).__init__()
self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_A(nn.Module):
def __init__(self):
super(Reduction_A, self).__init__()
self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(384, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1),
BasicConv2d(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_B(nn.Module):
def __init__(self):
super(Inception_B, self).__init__()
self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(224, 256, kernel_size=(7,1), stride=1, padding=(3,0))
)
self.branch2 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(192, 224, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(224, 224, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(224, 256, kernel_size=(1,7), stride=1, padding=(0,3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_B(nn.Module):
def __init__(self):
super(Reduction_B, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(256, 320, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_C(nn.Module):
def __init__(self):
super(Inception_C, self).__init__()
self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionV4, self).__init__()
# Special attributs
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
# Modules
self.features = nn.Sequential(
BasicConv2d(3, 32, kernel_size=3, stride=2),
BasicConv2d(32, 32, kernel_size=3, stride=1),
BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
Mixed_3a(),
Mixed_4a(),
Mixed_5a(),
Inception_A(),
Inception_A(),
Inception_A(),
Inception_A(),
Reduction_A(), # Mixed_6a
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Reduction_B(), # Mixed_7a
Inception_C(),
Inception_C(),
Inception_C()
)
self.last_linear = nn.Linear(1536, num_classes)
def logits(self, features):
#Allows image of any size to be processed
adaptiveAvgPoolWidth = features.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def inceptionv4(num_classes=1000, pretrained='imagenet'):
if pretrained:
settings = pretrained_settings['inceptionv4'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = InceptionV4(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(1536, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = InceptionV4(num_classes=num_classes)
return model
'''
TEST
Run this code with:
```
cd $HOME/pretrained-models.pytorch
python -m pretrainedmodels.inceptionv4
```
'''
if __name__ == '__main__':
assert inceptionv4(num_classes=10, pretrained=None)
print('success')
assert inceptionv4(num_classes=1000, pretrained='imagenet')
print('success')
assert inceptionv4(num_classes=1001, pretrained='imagenet+background')
print('success')
# fail
assert inceptionv4(num_classes=1001, pretrained='imagenet')
|
bsd-3-clause
| 6,074,634,826,971,776,000
| 31.064246
| 94
| 0.554055
| false
| 3.084095
| false
| false
| false
|
SamyCookie/python-ant
|
demos/ant.core/10-weight.py
|
1
|
3095
|
"""
Extending on demo-03, implements an event callback we can use to process the
incoming data.
"""
from __future__ import print_function
import sys
import time
from ant.core import driver
from ant.core import node
from ant.core import event
from ant.core import message
from ant.core.constants import *
from config import *
NETKEY = '\xB9\xA5\x21\xFB\xBD\x72\xC3\x45'
command_id = 0x46
send_times = 2
pg_num = 1
DP_PAYLOAD = bytearray([command_id, 0xFF, 0xFF, 0, 0, send_times, pg_num, 1])
#DP_PAYLOAD = bytearray([255, 255, 0, 0, send_times, pg_num, 1])
CHANNEL = 1 #TODO: not really, channel is set much later
pay = DP_PAYLOAD
p1 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
pay[6] = 2
p2 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
pay[6] = 3
p3 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
pay[6] = 4
p4 = message.ChannelAcknowledgedDataMessage(number=CHANNEL,data=pay)
RSP = bytearray([0xFF, 0x3A])
class RsMessage(message.ChannelMessage):
type = 0x63
def __init__(self, number=0x00):
super(RsMessage, self).__init__(number=number, payload=RSP)
rs = RsMessage(0)
RECV = 0
class WeightListener(event.EventCallback):
def process(self, msg, _channel):
global RECV
if isinstance(msg, message.ChannelBroadcastDataMessage):
# print('R%04X: ' % RECV, *('%02X' % ord(byte) for byte in msg.payload))
data = str(msg.payload)
print('%04X' % RECV, *('%02X' % ord(byte) for byte in data))
# print [map(ord, msg.payload)]
page_number = msg.payload[1]
RECV += 1
if page_number == 1:
pass
elif page_number == 2:
pass
elif page_number == 3:
pass
elif page_number == 4:
pass
def delete_channel(channel):
channel.close()
channel.unassign()
def reset_channel(antnode, channel=None):
if channel:
delete_channel(channel)
channel = antnode.getFreeChannel()
channel.name = 'C:WGT'
channel.assign(net, CHANNEL_TYPE_TWOWAY_RECEIVE)
channel.setID(119, 0, 0)
channel.period = 0x2000 # nebo 0x0020 ???
channel.frequency = 0x39
rs.channelNumber = channel.number
channel.node.evm.writeMessage(rs)
channel.searchTimeout = TIMEOUT_NEVER
channel.open()
channel.registerCallback(WeightListener())
return channel
# Initialize
#LOG=None
#DEBUG=False
stick = driver.USB1Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Setup channel
net = node.Network(name='N:ANT+', key=NETKEY)
antnode.setNetworkKey(0, net)
channel = reset_channel(antnode)
restart = int(time.time())
# Wait
print("Listening for weight scale events ...")
while True:
time.sleep(0.1)
# Restart channel every 3 seconds
now = int(time.time())
if (now % 3 == 0) and (now != restart):
channel = reset_channel(antnode, channel)
RECV = 0
restart = now
# Shutdown
delete_channel(channel)
antnode.stop()
|
mit
| 1,154,412,797,407,415,800
| 24.791667
| 83
| 0.652666
| false
| 3.254469
| false
| false
| false
|
rkspsm/fixed-grid-image-viewer
|
main.py
|
1
|
9870
|
#! /usr/bin/env python3
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from math import *
from hashlib import sha256
import sys, os
import traceback
_args = sys.argv
class Stuff :
width = 800
height = 600
scale_factor = 0.01
next_k = {Qt.Key_D, Qt.Key_6, Qt.Key_Y}
prev_k = {Qt.Key_A, Qt.Key_4, Qt.Key_E}
refresh = {Qt.Key_X}
pan_toggle = {Qt.Key_Z, Qt.Key_W, Qt.Key_2}
remove_lines_button = {Qt.Key_C, Qt.Key_Home, Qt.Key_9}
pick_line_color = {Qt.Key_Q}
inc_play_rate = {Qt.Key_Up}
dec_play_rate = {Qt.Key_Down}
res_play_rate = {Qt.Key_F}
seek_f = {Qt.Key_Right}
seek_b = {Qt.Key_Left}
seek_0 = {Qt.Key_R}
play_pause = {Qt.Key_Space}
overlays = ['grid.png']
overlay_toggle = {Qt.Key_S, Qt.Key_5}
seek_t = 2 # seconds
zoom_button = Qt.MiddleButton
pan_button = Qt.LeftButton
pick_color_button = Qt.RightButton
@staticmethod
def isImage (f) :
return f.endswith ('.jpg') or f.endswith ('.png') or f.endswith ('.jpeg')
@staticmethod
def isMovie (f) :
return f.endswith ('.mkv') or f.endswith ('.avi') or f.endswith ('.mp4')
@staticmethod
def dist (p1, p2) :
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return sqrt (dx*dx + dy*dy)
@staticmethod
def tscale (t) :
return (t.m11 (), t.m22 ())
@staticmethod
def string_of_rect (r) :
return f'rect({r.x()}, {r.y()}, {r.width()}, {r.height()}'
class GfxView (QGraphicsView) :
def setMHandlers (self, mp, mm, mr) :
self.mp = mp
self.mm = mm
self.mr = mr
def setKHandlers (self, kp, kr) :
self.kp = kp
self.kr = kr
def mousePressEvent (self, e) :
self.mp (e)
def mouseReleaseEvent (self, e) :
self.mr (e)
def mouseMoveEvent (self, e) :
self.mm (e)
def keyPressEvent (self, e) :
self.kp (e)
def keyReleaseEvent (self, e) :
self.kr (e)
def sizeHint (self) :
return QSize (Stuff.width, Stuff.height)
class App (QApplication) :
def __init__ (self) :
QApplication.__init__ (self, _args)
self.args = _args[1:]
self.scene = QGraphicsScene ()
self.scene.setSceneRect (0, 0, Stuff.width, Stuff.height)
self.gv = GfxView (self.scene)
self.gv.setHorizontalScrollBarPolicy (Qt.ScrollBarAlwaysOff)
self.gv.setVerticalScrollBarPolicy (Qt.ScrollBarAlwaysOff)
self.gv.show ()
self.err = ''
try :
self.imgdir = self.args[0]
assert (os.path.isdir (self.imgdir))
self.setup ()
except :
traceback.print_exc ()
self.err = 'usage: <prog> <imgdir>'
self.exit (1)
def getFiles (self) :
files = os.listdir (self.imgdir)
files = [os.path.join (self.imgdir, x) for x in files]
files = [x for x in files if os.path.isfile (x)]
files = [x for x in files if Stuff.isImage (x) or Stuff.isMovie (x)]
if len (files) == 0 :
raise Exception ('no images in the dir')
files = list (sorted (files))
return files
def setup (self) :
self.isMedia = False
self.playratepow = 0
self.pan_on = True
self.files = self.getFiles ()
self.index = 0
self.savedTransforms = dict ()
self.lineColor = QColor (0, 0, 0)
self.lines = []
self.player = QMediaPlayer ()
self.overlayItems = [self.scene.addPixmap (QPixmap (x)) for x in Stuff.overlays]
for i, item in enumerate (self.overlayItems) :
item.setZValue (10 + i)
item.setVisible (False)
try :
skip = int (self.args[1])
except :
skip = 0
self.filesOrIndexUpdated (True, skip)
self.m_init ()
self.k_init ()
def removeLines (self) :
for line in self.lines :
self.scene.removeItem (line)
self.lines = []
def playrateUpdated (self) :
pos = self.player.position ()
self.player.setPlaybackRate (pow (2, self.playratepow))
self.player.setPosition (pos)
def getseekt (self) :
factor = pow (2, self.playratepow)
return Stuff.seek_t * factor * 1000
def filesOrIndexUpdated (self, isFirst = False, skip = 0) :
self.isMedia = False
if not isFirst :
self.player.stop ()
skip = 0
self.savedTransforms[self.lastDigest] = QTransform (self.imgItem.transform ())
self.scene.removeItem (self.imgItem)
self.index += skip
self.index = 0 if self.index >= len (self.files) else self.index
f = self.files[self.index]
s = sha256 ()
if Stuff.isImage (f) :
with open (self.files[self.index], 'rb') as handle :
s.update (handle.read ())
else :
s.update (f.encode ('utf-8'))
d = s.digest ()
if Stuff.isImage (f) :
img = QPixmap (self.files[self.index])
self.imgItem = self.scene.addPixmap (img)
wrat = img.width () / Stuff.width
hrat = img.height () / Stuff.height
else :
self.playratepow = 0
self.mediaContent = QMediaContent (QUrl.fromLocalFile (f))
self.player.setMedia (self.mediaContent)
self.player.setMuted (True)
self.imgItem = QGraphicsVideoItem ()
self.player.setVideoOutput (self.imgItem)
self.scene.addItem (self.imgItem)
self.player.play ()
self.isMedia = True
wrat = 1
hrat = 1
rat = wrat if wrat > hrat else hrat
if d in self.savedTransforms :
self.curt = self.savedTransforms[d]
else :
self.curt = QTransform (self.imgItem.transform ()).scale (1 / rat, 1 / rat)
self.imgItem.setTransform (self.curt)
self.lastDigest = d
self.removeLines ()
def m_init (self) :
self.gv.setMHandlers (self.mp, self.mm, self.mr)
self.zoom_origin = None
self.noscale = True
pass
def mp (self, e) :
if e.button () == Stuff.zoom_button or e.button () == Stuff.pan_button :
self.zoom_origin = (e.x (), e.y ())
self.curt = QTransform (self.imgItem.transform ())
if e.button () == Stuff.pan_button :
self.noscale = True
self.linePt = self.gv.mapToScene (QPoint (e.x (), e.y ()))
else :
self.noscale = False
def mr (self, e) :
self.zoom_origin = None
if e.button () == Stuff.pick_color_button :
self.lineColor = QColorDialog.getColor ()
def zoi (self) :
pt = QPoint (self.zoom_origin[0], self.zoom_origin[1])
pts = self.gv.mapToScene (pt)
pti = self.imgItem.mapFromScene (pts)
return pti
def mm (self, e) :
if self.zoom_origin is None :
return
pt = (e.x (), e.y ())
#d = Stuff.dist (pt, self.zoom_origin)
dx = pt[0] - self.zoom_origin[0]
dy = pt[1] - self.zoom_origin[1]
if self.noscale :
if not self.pan_on :
newPt = self.gv.mapToScene (QPoint (e.x (), e.y ()))
line = self.scene.addLine (QLineF (self.linePt, newPt), QPen (self.lineColor, 2))
line.setZValue (500)
self.lines.append (line)
self.linePt = newPt
return
scale = self.curt.m11 ()
self.tempt = QTransform (self.curt).translate (dx / scale, dy / scale)
self.imgItem.setTransform (self.tempt)
else :
scale = 1 + dx * Stuff.scale_factor
#self.tempt = QTransform (self.curt).scale (scale, scale)
z1 = self.zoi ()
self.tempt = QTransform (self.curt).translate (- self.curt.dx (), - self.curt.dy ()).scale (scale, scale).translate (self.curt.dx (), self.curt.dy ())
self.imgItem.setTransform (self.tempt)
z2 = self.zoi ()
dx = z2.x () - z1.x ()
dy = z2.y () - z1.y ()
self.tempt.translate (dx, dy)
self.imgItem.setTransform (self.tempt)
def k_init (self) :
self.gv.setKHandlers (self.kp, self.kr)
def kp (self, e) :
pass
def kr (self, e) :
if e.key () in Stuff.next_k :
self.index += 1
self.filesOrIndexUpdated ()
elif e.key () in Stuff.prev_k :
self.index -= 1
self.filesOrIndexUpdated ()
elif e.key () in Stuff.overlay_toggle :
for item in self.overlayItems :
item.setVisible (not item.isVisible ())
elif e.key () in Stuff.refresh :
newFiles = self.getFiles ()
curFile = self.files[self.index]
if curFile in newFiles :
newIndex = newFiles.index (curFile)
else :
newIndex = self.index
self.files = newFiles
self.index = newIndex
elif e.key () in Stuff.pan_toggle :
self.pan_on = not self.pan_on
elif e.key () in Stuff.remove_lines_button :
self.removeLines ()
elif e.key () in Stuff.pick_line_color :
self.lineColor = QColorDialog.getColor ()
elif self.isMedia and e.key () in Stuff.inc_play_rate :
self.playratepow += 1
self.playrateUpdated ()
elif self.isMedia and e.key () in Stuff.dec_play_rate :
self.playratepow -= 1
self.playrateUpdated ()
elif self.isMedia and e.key () in Stuff.res_play_rate :
self.playratepow = 0
self.playrateUpdated ()
elif self.isMedia and e.key () in Stuff.seek_f :
t = self.getseekt ()
pos = self.player.position ()
pos += t
pos = 0 if pos < 0 else pos
self.player.setPosition (pos)
elif self.isMedia and e.key () in Stuff.seek_b :
t = self.getseekt ()
pos = self.player.position ()
pos -= t
pos = 0 if pos < 0 else pos
self.player.setPosition (pos)
elif self.isMedia and e.key () in Stuff.seek_0 :
self.player.setPosition (0)
self.player.play ()
elif self.isMedia and e.key () in Stuff.play_pause :
state = self.player.state ()
if state == QMediaPlayer.StoppedState :
self.player.setPosition (0)
self.player.play ()
elif state == QMediaPlayer.PlayingState :
self.player.pause ()
elif state == QMediaPlayer.PausedState :
self.player.play ()
def go (self) :
if self.err != '' :
print (self.err)
sys.exit (1)
else :
sys.exit (self.exec_ ())
App ().go ()
|
agpl-3.0
| 5,272,151,468,709,609,000
| 26.115385
| 156
| 0.60233
| false
| 3.058568
| false
| false
| false
|
xme1226/horizon
|
openstack_dashboard/dashboards/admin/instances/tables.py
|
1
|
7326
|
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import title # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.instances \
import tables as project_tables
from openstack_dashboard import policy
class AdminEditInstance(project_tables.EditInstance):
url = "horizon:admin:instances:update"
class MigrateInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "migrate"
classes = ("btn-migrate", "btn-danger")
policy_rules = (("compute", "compute_extension:admin_actions:migrate"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Migrate Instance",
u"Migrate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled migration (pending confirmation) of Instance",
u"Scheduled migration (pending confirmation) of Instances",
count
)
def allowed(self, request, instance):
return ((instance.status in project_tables.ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not project_tables.is_deleting(instance))
def action(self, request, obj_id):
api.nova.server_migrate(request, obj_id)
class LiveMigrateInstance(policy.PolicyTargetMixin,
tables.LinkAction):
name = "live_migrate"
verbose_name = _("Live Migrate Instance")
url = "horizon:admin:instances:live_migrate"
classes = ("ajax-modal", "btn-migrate", "btn-danger")
policy_rules = (
("compute", "compute_extension:admin_actions:migrateLive"),)
def allowed(self, request, instance):
return ((instance.status in project_tables.ACTIVE_STATES)
and not project_tables.is_deleting(instance))
class AdminUpdateRow(project_tables.UpdateRow):
def get_data(self, request, instance_id):
instance = super(AdminUpdateRow, self).get_data(request, instance_id)
tenant = api.keystone.tenant_get(request,
instance.tenant_id,
admin=True)
instance.tenant_name = getattr(tenant, "name", None)
return instance
class AdminInstanceFilterAction(tables.FilterAction):
# Change default name of 'filter' to distinguish this one from the
# project instances table filter, since this is used as part of the
# session property used for persisting the filter.
name = "filter_admin_instances"
filter_type = "server"
filter_choices = (('project', _("Project"), True),
('host', _("Host ="), True),
('name', _("Name"), True),
('ip', _("IPv4 Address ="), True),
('ip6', _("IPv6 Address ="), True),
('status', _("Status ="), True),
('image', _("Image ID ="), True),
('flavor', _("Flavor ID ="), True))
class AdminInstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved_offloaded", True),
)
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
# NOTE(gabriel): Commenting out the user column because all we have
# is an ID, and correlating that at production scale using our current
# techniques isn't practical. It can be added back in when we have names
# returned in a practical manner by the API.
# user = tables.Column("user_id", verbose_name=_("User"))
host = tables.Column("OS-EXT-SRV-ATTR:host",
verbose_name=_("Host"),
classes=('nowrap-col',))
name = tables.Column("name",
link=("horizon:admin:instances:detail"),
verbose_name=_("Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(project_tables.get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(project_tables.get_size,
verbose_name=_("Size"),
classes=('nowrap-col',),
attrs={'data-type': 'size'})
status = tables.Column(
"status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=project_tables.STATUS_DISPLAY_CHOICES)
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
filters=(title, filters.replace_underscores),
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=project_tables.TASK_DISPLAY_CHOICES)
state = tables.Column(project_tables.get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"))
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta:
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
table_actions = (project_tables.TerminateInstance,
AdminInstanceFilterAction)
row_class = AdminUpdateRow
row_actions = (project_tables.ConfirmResize,
project_tables.RevertResize,
AdminEditInstance,
project_tables.ConsoleLink,
project_tables.LogLink,
project_tables.CreateSnapshot,
project_tables.TogglePause,
project_tables.ToggleSuspend,
MigrateInstance,
LiveMigrateInstance,
project_tables.SoftRebootInstance,
project_tables.RebootInstance,
project_tables.TerminateInstance)
|
apache-2.0
| -1,049,580,477,358,358,000
| 39.927374
| 78
| 0.578078
| false
| 4.616257
| false
| false
| false
|
capecchi/capecchi.github.io
|
projects/ParkMaps/helpers.py
|
1
|
4081
|
from bs4 import BeautifulSoup
import geopy.distance as dist
import glob
import numpy as np
import gpxpy
def consecutive_arrays(arr):
consec_arr = []
consec = []
for i, arr_val in enumerate(arr):
if len(consec) == 0 or arr_val == consec[-1] + 1:
consec.append(arr_val)
else:
consec_arr.append(consec)
consec = [arr_val]
if i == len(arr)-1:
consec_arr.append(consec)
return consec_arr
def point_to_point_dist(pt1, pt2):
return dist.distance(pt1[::-1][1:], pt2[::-1][1:]).m
def point_to_route_min_dist(pt, route, return_dist=False):
# find index of closest approach
lat_m_per_deg = dist.distance([pt[1], pt[0]], [pt[1] + 1., pt[0]]).m
lon_m_per_deg = dist.distance([pt[1], pt[0]], [pt[1], pt[0] + 1.]).m
# slow
# dis = [dist.distance(pt[::-1][1:], route[i, ::-1][1:]).m for i in np.arange(len(route))]
# faster
dis = list(
np.sqrt(((pt[0] - route[:, 0]) * lon_m_per_deg) ** 2 + ((pt[1] - route[:, 1]) * lat_m_per_deg) ** 2)) # [m]
if return_dist:
return dis
else:
i_close_approach = dis.index(min(dis))
min_dist = min(dis)
return min_dist, i_close_approach
def index_path_dist_to_ends(index, route, return_both=False):
# get the distance along the path between a route index and the route ends
pt = route[index, :]
lat_m_per_deg = dist.distance([pt[1], pt[0]], [pt[1] + 1., pt[0]]).m
lon_m_per_deg = dist.distance([pt[1], pt[0]], [pt[1], pt[0] + 1.]).m
lon = route[:, 0]
lat = route[:, 1]
dis = np.sqrt(((lon - np.roll(lon, -1)) * lon_m_per_deg) ** 2 + ((lat - np.roll(lat, -1)) * lat_m_per_deg) ** 2) # [m]
dis[0] = 0
if return_both:
return np.sum(dis[:index]), np.sum(dis[index:])
else:
return min([np.sum(dis[:index]), np.sum(dis[index:])])
def chop_off_ends(coords, thresh=75.):
i1, i2 = 0, len(coords[:, 0])-1
try:
while point_to_point_dist(coords[i1, :], coords[0, :]) < thresh:
i1 += 1
while point_to_point_dist(coords[i2, :], coords[-1, :]) < thresh:
i2 -= 1
return coords[i1:i2+1, :] # +1 to include i2 in returned array
except IndexError:
return []
def extract_coords_kml(runfile):
with open(runfile, 'r') as f:
s = BeautifulSoup(f, 'xml')
run_coords = []
for coords in s.find_all('coordinates'):
if len(run_coords) == 0:
run_coords = np.array(process_coordinate_string(coords.string))
else:
run_coords = np.append(run_coords, process_coordinate_string(coords.string))
return run_coords
def extract_coords_gpx(runfile):
with open(runfile, 'r') as f:
gpx = gpxpy.parse(f)
run_coords = []
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
run_coords.append([point.longitude, point.latitude, point.elevation])
return np.array(run_coords)
def process_coordinate_string(str):
"""
Take the coordinate string from the KML file, and break it up into [[Lon,Lat,Alt],[Lon,Lat,Alt],...]
"""
long_lat_alt_arr = []
for point in str.split('\n'):
if len(point) > 0:
long_lat_alt_arr.append(
[float(point.split(',')[0]), float(point.split(',')[1]), float(point.split(',')[2])])
return long_lat_alt_arr
def get_dummy_coordinates(top=True):
m_per_deg = dist.distance([0, 0], [0, 1]).m
lon = np.linspace(-2500 / m_per_deg, 2500 / m_per_deg, num=500) # 5k
alt = np.zeros_like(lon)
lat = np.array([np.sqrt(1000 ** 2 - dist.distance([0, 0], [0, lon[i]]).m ** 2) / m_per_deg if dist.distance(
[0, 0], [0, lon[i]]).m <= 1000. else 0. for i in np.arange(len(lon))])
if not top:
lat *= -1.
run_coords = np.zeros((500, 3))
run_coords[:, 0] = lon
run_coords[:, 1] = lat
run_coords[:, 2] = alt
return run_coords
if __name__ == '__main__':
park_dir = 'ElmCreekRuns/'
for file in glob.glob(park_dir + '*.tcx'):
if file.split('.')[-1] == 'kml':
rc = extract_coords_kml(file)
elif file.split('.')[-1] == 'gpx':
rc = extract_coords_gpx(file)
# elif file.split('.')[-1] == 'tcx':
# rc = extract_coords_tcx(file)
print('{} : {}'.format(file, len(rc)))
if __name__ == '__main__':
itest = [1, 2, 3, 4, 6, 7, 10, 11, 12, 13, 25, 26, 28]
consec = consecutive_arrays(itest)
aa=1
|
mit
| 6,959,912,168,072,804,000
| 28.572464
| 120
| 0.608674
| false
| 2.503681
| false
| false
| false
|
mozman/ezdxf
|
src/ezdxf/tools/zipmanager.py
|
1
|
2785
|
# Copyright (c) 2014-2020, Manfred Moitzi
# License: MIT License
from typing import BinaryIO, cast, TextIO, List, Optional
import zipfile
from contextlib import contextmanager
from ezdxf.lldxf.validator import is_dxf_stream, dxf_info
CRLF = b'\r\n'
LF = b'\n'
class ZipReader:
def __init__(self, zip_archive_name: str, errors='surrogateescape'):
if not zipfile.is_zipfile(zip_archive_name):
raise IOError(f"'{zip_archive_name}' is not a zip archive.")
self.zip_archive_name = zip_archive_name
self.zip_archive: Optional[zipfile.ZipFile] = None
self.dxf_file_name: Optional[str] = None
self.dxf_file: Optional[BinaryIO] = None
self.encoding = 'cp1252'
self.errors = errors
self.dxfversion = 'AC1009'
def open(self, dxf_file_name: str = None) -> None:
def open_dxf_file() -> BinaryIO:
# Open always in binary mode:
return cast(BinaryIO, self.zip_archive.open(self.dxf_file_name))
self.zip_archive = zipfile.ZipFile(self.zip_archive_name)
self.dxf_file_name = dxf_file_name if dxf_file_name is not None \
else self.get_first_dxf_file_name()
self.dxf_file = open_dxf_file()
# Reading with standard encoding 'cp1252' - readline() fails if leading
# comments contain none ASCII characters.
if not is_dxf_stream(cast(TextIO, self)):
raise IOError(f"'{self.dxf_file_name}' is not a DXF file.")
self.dxf_file = open_dxf_file() # restart
self.get_dxf_info()
self.dxf_file = open_dxf_file() # restart
def get_first_dxf_file_name(self) -> str:
dxf_file_names = self.get_dxf_file_names()
if len(dxf_file_names) > 0:
return dxf_file_names[0]
else:
raise IOError("No DXF files found.")
def get_dxf_file_names(self) -> List[str]:
return [name for name in self.zip_archive.namelist()
if name.lower().endswith('.dxf')]
def get_dxf_info(self) -> None:
info = dxf_info(cast(TextIO, self))
# Since DXF R2007 (AC1021) file encoding is always 'utf-8'
self.encoding = info.encoding if info.version < 'AC1021' else 'utf-8'
self.dxfversion = info.version
# Required TextIO interface
def readline(self) -> str:
next_line = self.dxf_file.readline().replace(CRLF, LF)
return str(next_line, self.encoding, self.errors)
def close(self) -> None:
self.zip_archive.close()
@contextmanager
def ctxZipReader(zipfilename: str, filename: str = None,
errors: str = 'surrogateescape') -> ZipReader:
zip_reader = ZipReader(zipfilename, errors=errors)
zip_reader.open(filename)
yield zip_reader
zip_reader.close()
|
mit
| -9,120,386,809,456,004,000
| 36.133333
| 79
| 0.628725
| false
| 3.359469
| false
| false
| false
|
membase/membase-cli
|
util_cli.py
|
1
|
2446
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import itertools
BIG_VALUE = 2 ** 60
SMALL_VALUE = - (2 ** 60)
def hostport(hoststring, default_port=8091):
""" finds the host and port given a host:port string """
try:
host, port = hoststring.split(':')
port = int(port)
except ValueError:
host = hoststring
port = default_port
return (host, port)
def time_label(s):
# -(2**64) -> '-inf'
# 2**64 -> 'inf'
# 0 -> '0'
# 4 -> '4us'
# 838384 -> '838ms'
# 8283852 -> '8s'
if s > BIG_VALUE:
return 'inf'
elif s < SMALL_VALUE:
return '-inf'
elif s == 0:
return '0'
product = 1
sizes = (('us', 1), ('ms', 1000), ('s', 1000), ('m', 60))
sizeMap = []
for l,sz in sizes:
product = sz * product
sizeMap.insert(0, (l, product))
lbl, factor = itertools.dropwhile(lambda x: x[1] > s, sizeMap).next()
return "%d %s" % (s / factor, lbl)
def size_label(s):
if type(s) in (int, long, float, complex) :
if s == 0:
return "0"
sizes=['', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
e = math.floor(math.log(abs(s), 1024))
suffix = sizes[int(e)]
return "%d %s" % (s/(1024 ** math.floor(e)), suffix)
else:
return s
def linreg(X, Y):
"""
Summary
Linear regression of y = ax + b
Usage
real, real, real = linreg(list, list)
Returns coefficients to the regression line "y=ax+b" from x[] and y[], and R^2 Value
"""
if len(X) != len(Y): raise ValueError, 'unequal length'
N = len(X)
Sx = Sy = Sxx = Syy = Sxy = 0.0
for x, y in map(None, X, Y):
Sx = Sx + x
Sy = Sy + y
Sxx = Sxx + x*x
Syy = Syy + y*y
Sxy = Sxy + x*y
det = Sxx * N - Sx * Sx
if det == 0:
return 0, 0
else:
a, b = (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det
return a, b
def two_pass_variance(data):
n = 0
sum1 = 0
sum2 = 0
for x in data:
n = n + 1
sum1 = sum1 + x
mean = sum1/n
for x in data:
sum2 = sum2 + (x - mean)*(x - mean)
if n <= 1:
return 0
variance = sum2/(n - 1)
return variance
def pretty_float(number, precision=2):
return '%.*f' % (precision, number)
def pretty_print(obj):
import simplejson as json
return json.dumps(obj, indent=4, sort_keys=True)
|
apache-2.0
| 7,375,997,589,813,253,000
| 23.46
| 88
| 0.498365
| false
| 2.99022
| false
| false
| false
|
obeattie/sqlalchemy
|
examples/graphs/directed_graph.py
|
1
|
2180
|
"""a directed graph example."""
from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey
from sqlalchemy.orm import mapper, relation, create_session
import logging
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
meta = MetaData('sqlite://')
nodes = Table('nodes', meta,
Column("nodeid", Integer, primary_key=True)
)
# here we have lower.nodeid <= higher.nodeid
edges = Table('edges', meta,
Column("lower_id", Integer, ForeignKey('nodes.nodeid'), primary_key=True),
Column("higher_id", Integer, ForeignKey('nodes.nodeid'), primary_key=True)
)
meta.create_all()
class Node(object):
def __init__(self, id):
self.nodeid = id
def add_neighbor(self, othernode):
Edge(self, othernode)
def higher_neighbors(self):
return [x.higher_node for x in self.lower_edges]
def lower_neighbors(self):
return [x.lower_node for x in self.higher_edges]
class Edge(object):
def __init__(self, n1, n2):
if n1.nodeid < n2.nodeid:
self.lower_node = n1
self.higher_node = n2
else:
self.lower_node = n2
self.higher_node = n1
mapper(Node, nodes)
mapper(Edge, edges, properties={
'lower_node':relation(Node,
primaryjoin=edges.c.lower_id==nodes.c.nodeid, backref='lower_edges'),
'higher_node':relation(Node,
primaryjoin=edges.c.higher_id==nodes.c.nodeid, backref='higher_edges')
}
)
session = create_session()
# create a directed graph like this:
# n1 -> n2 -> n5
# -> n7
# -> n3 -> n6
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n6 = Node(6)
n7 = Node(7)
n2.add_neighbor(n5)
n3.add_neighbor(n6)
n7.add_neighbor(n2)
n1.add_neighbor(n3)
n2.add_neighbor(n1)
[session.add(x) for x in [n1, n2, n3, n4, n5, n6, n7]]
session.flush()
session.expunge_all()
n2 = session.query(Node).get(2)
n3 = session.query(Node).get(3)
assert [x.nodeid for x in n3.higher_neighbors()] == [6]
assert [x.nodeid for x in n3.lower_neighbors()] == [1]
assert [x.nodeid for x in n2.lower_neighbors()] == [1]
assert [x.nodeid for x in n2.higher_neighbors()] == [5,7]
|
mit
| -4,302,172,050,639,920,600
| 24.348837
| 78
| 0.640367
| false
| 2.974079
| false
| false
| false
|
jbohren-forks/catkin_tools
|
catkin_tools/verbs/catkin_list/cli.py
|
1
|
3698
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from catkin_tools.argument_parsing import add_context_args
from catkin_tools.context import Context
from catkin_pkg.packages import find_packages
from catkin_pkg.package import InvalidPackage
from catkin_tools.terminal_color import ColorMapper
color_mapper = ColorMapper()
clr = color_mapper.clr
def prepare_arguments(parser):
add_context_args(parser)
add = parser.add_argument
# What packages to build
add('folders', nargs='*',
help='Folders in which to find packages. (default: workspace source space)')
add('--deps', '--dependencies', default=False, action='store_true',
help="List dependencies of each package.")
add('--depends-on', nargs='*',
help="List all packages that depend on supplied argument package(s).")
add('--quiet', default=False, action='store_true',
help="Don't print out detected package warnings.")
add('--unformatted', '-u', default=None, action='store_true',
help='Print list without punctuation and additional details.')
return parser
def main(opts):
if opts.folders:
folders = opts.folders
else:
# Load the context
ctx = Context.load(opts.workspace, opts.profile, load_env=False)
if not ctx:
print(clr("@{rf}ERROR: Could not determine workspace.@|"), file=sys.stderr)
sys.exit(1)
folders = [ctx.source_space_abs]
list_entry_format = '@{pf}-@| @{cf}%s@|' if not opts.unformatted else '%s'
opts.depends_on = set(opts.depends_on) if opts.depends_on else set()
warnings = []
try:
for folder in folders:
for pkg_pth, pkg in find_packages(folder, warnings=warnings).items():
build_depend_names = [d.name for d in pkg.build_depends]
is_build_dep = opts.depends_on.intersection(
build_depend_names)
run_depend_names = [d.name for d in pkg.run_depends]
is_run_dep = opts.depends_on.intersection(
run_depend_names)
if not opts.depends_on or is_build_dep or is_run_dep:
print(clr(list_entry_format % pkg.name))
if opts.deps:
if build_depend_names:
print(clr(' @{yf}build_depend:@|'))
for dep in build_depend_names:
print(clr(' @{pf}-@| %s' % dep))
if run_depend_names:
print(clr(' @{yf}run_depend:@|'))
for dep in run_depend_names:
print(clr(' @{pf}-@| %s' % dep))
except InvalidPackage as ex:
message = '\n'.join(ex.args)
print(clr("@{rf}Error:@| The directory %s contains an invalid package."
" See below for details:\n\n%s" % (folder, message)))
# Print out warnings
if not opts.quiet:
for warning in warnings:
print(clr("@{yf}Warning:@| %s" % warning), file=sys.stderr)
|
apache-2.0
| 8,735,294,792,840,117,000
| 36.353535
| 87
| 0.604922
| false
| 3.980624
| false
| false
| false
|
davidam/python-examples
|
basics/json/json2pandas.py
|
1
|
1436
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from pprint import pprint
import pandas as pd
from pandas.io.json import json_normalize
data = [
{
'name': {
'first': 'vikash',
'last': 'singh'
},
'age': 27
},
{
'name': {
'first': 'satyam',
'last': 'singh'
},
'age': 14
}
]
df = pd.DataFrame.from_dict(json_normalize(data), orient='columns')
print(df)
print(json_normalize(data))
jsondata = open('perceval.json').read()
json_object = json.loads(jsondata)
print(json_object)
|
gpl-3.0
| 6,866,459,863,526,685,000
| 26.037736
| 70
| 0.691556
| false
| 3.301843
| false
| false
| false
|
rhyolight/nupic.son
|
tests/app/melange/views/test_connection.py
|
1
|
69883
|
# Copyright 2014 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for connection related views."""
import mock
import httplib
import unittest
from django import http
from melange.logic import connection as connection_logic
from melange.logic import profile as profile_logic
from melange.models import connection as connection_model
from melange.request import exception
from melange.views import connection as connection_view
from melange.utils import rich_bool
from soc.views.helper import request_data
# TODO(daniel): Summer Of code module cannot be imported here
from soc.modules.gsoc.logic import profile as soc_profile_logic
from tests import org_utils
from tests import profile_utils
from tests import program_utils
from tests.utils import connection_utils
class UrlConnectionIsForCurrentUserAccessCheckerTest(unittest.TestCase):
"""Unit tests for UrlConnectionIsForCurrentUserAccessChecker class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
sponsor = program_utils.seedSponsor()
self.program = program_utils.seedProgram(sponsor_key=sponsor.key())
self.organization = org_utils.seedOrganization(self.program.key())
self.user = profile_utils.seedNDBUser()
profile = profile_utils.seedNDBProfile(
self.program.key(), user=self.user)
connection = connection_utils.seed_new_connection(
profile.key, self.organization.key)
kwargs = {
'sponsor': sponsor.key().name(),
'program': self.program.program_id,
'organization': self.organization.org_id,
'user': self.user.user_id,
'id': str(connection.key.id())
}
self.data = request_data.RequestData(None, None, kwargs)
def testConnectedUserAccessGranted(self):
"""Tests that access is granted for the connected user."""
profile_utils.loginNDB(self.user)
access_checker = (
connection_view.UrlConnectionIsForCurrentUserAccessChecker())
access_checker.checkAccess(self.data, None)
def testAnotherUserAccessDenied(self):
"""Tests that another (not connected) user is denied access."""
# seed another user who is currently logged in
other_user = profile_utils.seedNDBUser()
profile_utils.loginNDB(other_user)
access_checker = (
connection_view.UrlConnectionIsForCurrentUserAccessChecker())
with self.assertRaises(exception.UserError) as context:
access_checker.checkAccess(self.data, None)
self.assertEqual(context.exception.status, httplib.FORBIDDEN)
def testUserWithNoProfileAccessDenied(self):
"""Tests that access for a user with no profile is denied."""
# check for not logged-in user with no profile
profile_utils.logout()
access_checker = (
connection_view.UrlConnectionIsForCurrentUserAccessChecker())
with self.assertRaises(exception.UserError) as context:
access_checker.checkAccess(self.data, None)
self.assertEqual(context.exception.status, httplib.FORBIDDEN)
# check for another user who is currently logged in but
# does not have a profile
other_user = profile_utils.seedNDBUser()
profile_utils.loginNDB(other_user)
access_checker = (
connection_view.UrlConnectionIsForCurrentUserAccessChecker())
with self.assertRaises(exception.UserError) as context:
access_checker.checkAccess(self.data, None)
self.assertEqual(context.exception.status, httplib.FORBIDDEN)
def testOrgAdminAccessDenied(self):
"""Tests that org admin for connected organization is denied access."""
# seed another user who is currently logged in
other_user = profile_utils.seedNDBUser()
profile_utils.loginNDB(other_user)
profile_utils.seedNDBProfile(
self.program.key(), user=other_user, admin_for=[self.organization.key])
access_checker = (
connection_view.UrlConnectionIsForCurrentUserAccessChecker())
with self.assertRaises(exception.UserError) as context:
access_checker.checkAccess(self.data, None)
self.assertEqual(context.exception.status, httplib.FORBIDDEN)
class IsUserOrgAdminForUrlConnectionTest(unittest.TestCase):
"""Unit tests for IsUserOrgAdminForUrlConnection class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
sponsor = program_utils.seedSponsor()
self.program = program_utils.seedProgram(sponsor_key=sponsor.key())
self.organization = org_utils.seedOrganization(self.program.key())
self.user = profile_utils.seedNDBUser()
profile = profile_utils.seedNDBProfile(
self.program.key(), user=self.user)
connection = connection_utils.seed_new_connection(
profile.key, self.organization.key)
kwargs = {
'sponsor': sponsor.key().name(),
'program': self.program.program_id,
'organization': self.organization.org_id,
'user': self.user.user_id,
'id': str(connection.key.id())
}
self.data = request_data.RequestData(None, None, kwargs)
def testOrgAdminAccessGranted(self):
"""Tests that access is granted for org admin for the connected org."""
# seed a user who is currently logged in
other_user = profile_utils.seedNDBUser()
profile_utils.loginNDB(other_user)
profile_utils.seedNDBProfile(
self.program.key(), user=other_user, admin_for=[self.organization.key])
access_checker = connection_view.IsUserOrgAdminForUrlConnection()
access_checker.checkAccess(self.data, None)
def testConnectedUserAccessDenied(self):
"""Tests that access is denied for connected user."""
profile_utils.loginNDB(self.user)
access_checker = connection_view.IsUserOrgAdminForUrlConnection()
with self.assertRaises(exception.UserError) as context:
access_checker.checkAccess(self.data, None)
self.assertEqual(context.exception.status, httplib.FORBIDDEN)
def testOtherOrgAdminAccessDenied(self):
"""Tests that access is denied for org admin for another org."""
# seed another organization
other_org = org_utils.seedOrganization(self.program.key())
# seed a user who is currently logged in
other_user = profile_utils.seedNDBUser()
profile_utils.loginNDB(other_user)
profile_utils.seedNDBProfile(
self.program.key(), user=other_user, admin_for=[other_org.key])
access_checker = connection_view.IsUserOrgAdminForUrlConnection()
with self.assertRaises(exception.UserError) as context:
access_checker.checkAccess(self.data, None)
self.assertEqual(context.exception.status, httplib.FORBIDDEN)
class _MockView(object):
"""Simple request handler to be used as a callback for other handlers."""
def get(self, data, access, mutator):
"""See base.RequestHandler.get for specification."""
pass
class UserActionsFormHandlerTest(unittest.TestCase):
"""Unit tests for UserActionsFormHandler class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.sponsor = program_utils.seedSponsor()
self.program = program_utils.seedProgram(sponsor_key=self.sponsor.key())
self.org = org_utils.seedOrganization(self.program.key())
# unused object used as a callback for the handler
self.view = _MockView()
def testUserNoRoleToNoRoleWhileNoRoleOffered(self):
"""Tests NO ROLE if user has no role and no role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# no role is offered to the user; the user does not request any role
connection = connection_utils.seed_new_connection(profile.key, self.org.key)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user still does not request any role
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testUserNoRoleToNoRoleWhileMentorRoleOffered(self):
"""Tests NO ROLE if user has no role and mentor role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# mentor role is offered to the user; the user does not request any role
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.MENTOR_ROLE)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user still does not request any role
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testUserNoRoleToNoRoleWhileOrgAdminRoleOffered(self):
"""Tests NO ROLE if user has no role and org admin role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# org admin role is offered to the user; the user does not request any role
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.ORG_ADMIN_ROLE)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user still does not request any role
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testUserNoRoleToRoleWhileNoRoleOffered(self):
"""Tests ROLE if user has no role and no role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# no role is offered to the user; the user does not request any role
connection = connection_utils.seed_new_connection(profile.key, self.org.key)
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user requests a role now
request.POST = {'role': connection_model.ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertTrue(connection.seen_by_user)
self.assertFalse(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(connection_logic._USER_REQUESTS_ROLE, message.content)
def testUserNoRoleToRoleWhileMentorRoleOffered(self):
"""Tests ROLE if user has no role and mentor role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# mentor role is offered to the user; the user does not request any role
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.MENTOR_ROLE)
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user requests a role now
request.POST = {'role': connection_model.ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertTrue(connection.seen_by_user)
self.assertFalse(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(connection_logic._USER_REQUESTS_ROLE, message.content)
def testUserNoRoleToRoleWhileOrgAdminRoleOffered(self):
"""Tests ROLE if user has no role and org admin role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# org admin role is offered to the user; the user does not request any role
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.ORG_ADMIN_ROLE)
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user requests a role now
request.POST = {'role': connection_model.ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertTrue(connection.seen_by_user)
self.assertFalse(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(connection_logic._USER_REQUESTS_ROLE, message.content)
def testUserRoleToRoleWhileNoRoleOffered(self):
"""Tests ROLE if user has role and no role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# no role is offered to the user; the user requests role
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, user_role=connection_model.ROLE)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user still requests a role
request.POST = {'role': connection_model.ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testUserRoleToRoleWhileMentorRoleOffered(self):
"""Tests ROLE if user has role and mentor role is offered."""
# mentor role is offered to the user; the user requests role
profile = profile_utils.seedNDBProfile(
self.program.key(), mentor_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user still requests a role
request.POST = {'role': connection_model.ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testUserRoleToRoleWhileOrgAdminRoleOffered(self):
"""Tests ROLE if user has role and org admin role is offered."""
# org admin role is offered to the user; the user requests role
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user still requests a role
request.POST = {'role': connection_model.ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testUserRoleToNoRoleWhileNoRoleOffered(self):
"""Tests NO ROLE if user has role and no role is offered."""
profile = profile_utils.seedNDBProfile(self.program.key())
# no role is offered to the user; the user requests role
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, user_role=connection_model.ROLE)
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user does not request role anymore
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertTrue(connection.seen_by_user)
self.assertFalse(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(
connection_logic._USER_DOES_NOT_REQUEST_ROLE, message.content)
def testUserRoleToNoRoleWhileMentorRoleOffered(self):
"""Tests NO ROLE if user has role and mentor role is offered."""
# mentor role is offered to the user; the user requests role
profile = profile_utils.seedNDBProfile(
self.program.key(), mentor_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_user = connection.seen_by_user
old_seen_by_org = connection.seen_by_org
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id()),
}
request = http.HttpRequest()
# the user does not request role anymore
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
# assume that mentor is not eligible to quit
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.FALSE):
with self.assertRaises(exception.UserError) as context:
handler.handle(data, None, None)
self.assertEqual(context.exception.status, httplib.BAD_REQUEST)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
# try again but now, the user is eligible to quit
request = http.HttpRequest()
# the user does not request role anymore
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
# assume that mentor is eligible to quit
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.TRUE):
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertTrue(connection.seen_by_user)
self.assertFalse(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(
connection_logic._USER_DOES_NOT_REQUEST_ROLE, message.content)
def testUserRoleToNoRoleWhileOrgAdminRoleOffered(self):
"""Tests NO ROLE if user has role and org admin role is offered."""
# org admin role is offered to the user; the user requests role
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_user = connection.seen_by_user
old_seen_by_org = connection.seen_by_org
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': str(connection.key.id())
}
request = http.HttpRequest()
# the user does not request role anymore
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
# assume that mentor is not eligible to quit
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.FALSE):
with self.assertRaises(exception.UserError) as context:
handler.handle(data, None, None)
self.assertEqual(context.exception.status, httplib.BAD_REQUEST)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
# try again but now, the user is eligible to quit
request = http.HttpRequest()
# the user does not request role anymore
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.UserActionsFormHandler(self.view, url='unsed')
# assume that mentor is eligible to quit
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.TRUE):
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertTrue(connection.seen_by_user)
self.assertFalse(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(connection_logic._USER_DOES_NOT_REQUEST_ROLE, message.content)
def _generatedMessageContent(org_role, org_admin):
"""Returns part of content of a message that is generated when role offered
by organization changes.
Args:
org_role: new role offered by organization.
org_admin: profile entity of org admin who changed the role
Returns:
a string that is a part of message content that is generated.
"""
return connection_logic._ORG_ROLE_CHANGED % (
connection_model.VERBOSE_ROLE_NAMES[org_role], org_admin.public_name)
class OrgActionsFormHandlerTest(unittest.TestCase):
"""Unit tests for OrgActionsFormHandler class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.sponsor = program_utils.seedSponsor()
self.program = program_utils.seedProgram(sponsor_key=self.sponsor.key())
self.org = org_utils.seedOrganization(self.program.key())
# unused object used as a callback for the handler
self.view = _MockView()
def testNoRoleToNoRoleWhileNoRoleRequested(self):
"""Tests NO ROLE if no role offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(profile.key, self.org.key)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# no role is still offered
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testNoRoleToNoRoleWhileRoleRequested(self):
"""Tests NO ROLE if no role offered and user requests role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user requests role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, user_role=connection_model.ROLE)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# no role is still offered
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testNoRoleToMentorRoleWhileNoRoleRequested(self):
"""Tests MENTOR ROLE if no role offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(profile.key, self.org.key)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.MENTOR_ROLE, data.ndb_profile), message.content)
def testNoRoleToMentorRoleWhileRoleRequested(self):
"""Tests MENTOR ROLE if no role offered and user requests role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user requests role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, user_role=connection_model.ROLE)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.MENTOR_ROLE, data.ndb_profile), message.content)
def testNoRoleToOrgAdminRoleWhileNoRoleRequested(self):
"""Tests ORG ADMIN ROLE if no role offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(profile.key, self.org.key)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# org admin role is offered now
request.POST = {'role': connection_model.ORG_ADMIN_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.ORG_ADMIN_ROLE, data.ndb_profile), message.content)
def testNoRoleToOrgAdminRoleWhileRoleRequested(self):
"""Tests ORG ADMIN ROLE if no role offered and user requests role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user requests role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, user_role=connection_model.ROLE)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# org admin role is offered now
request.POST = {'role': connection_model.ORG_ADMIN_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.ORG_ADMIN_ROLE, data.ndb_profile), message.content)
def testMentorRoleToNoRoleWhileNoRoleRequested(self):
"""Tests NO ROLE if mentor role offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.MENTOR_ROLE)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# no role is offered now
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.NO_ROLE, data.ndb_profile), message.content)
def testMentorRoleToNoRoleWhileRoleRequested(self):
"""Tests NO ROLE if mentor role offered and user requests role."""
# user is a mentor for organization
profile = profile_utils.seedNDBProfile(
self.program.key(), mentor_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# no role is offered now
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# assume that mentor cannot be removed
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.FALSE):
with self.assertRaises(exception.UserError) as context:
handler.handle(data, None, None)
self.assertEqual(context.exception.status, httplib.BAD_REQUEST)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
# now the mentor can be removed
request = http.HttpRequest()
# no role is offered now
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
# assume that mentor can be removed
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.TRUE):
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.NO_ROLE, data.ndb_profile), message.content)
def testMentorRoleToMentorRoleWhileNoRoleRequested(self):
"""Tests MENTOR ROLE if mentor role offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.MENTOR_ROLE)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testMentorRoleToMentorRoleWhileRoleRequested(self):
"""Tests MENTOR ROLE if mentor role offered and user requests role."""
# user is a mentor for organization
profile = profile_utils.seedNDBProfile(
self.program.key(), mentor_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testMentorRoleToOrgAdminRoleWhileNoRoleRequested(self):
"""Tests ORG ADMIN if mentor role offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.MENTOR_ROLE)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# org admin role is offered now
request.POST = {'role': connection_model.ORG_ADMIN_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.ORG_ADMIN_ROLE, data.ndb_profile), message.content)
def testMentorRoleToOrgAdminRoleWhileRoleRequested(self):
"""Tests ORG ADMIN if mentor role offered and user requests role."""
# user is a mentor for organization
profile = profile_utils.seedNDBProfile(
self.program.key(), mentor_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# org admin role is offered now
request.POST = {'role': connection_model.ORG_ADMIN_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.ORG_ADMIN_ROLE, data.ndb_profile), message.content)
def testOrgAdminRoleToNoRoleWhileNoRoleRequested(self):
"""Tests NO ROLE if org admin offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.ORG_ADMIN_ROLE)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# no role is offered now
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.NO_ROLE, data.ndb_profile), message.content)
def testOrgAdminRoleToNoRoleWhileRoleRequested(self):
"""Tests NO ROLE if org admin offered and user requests role."""
# user is an org admin for organization
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# no role is offered now
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
# assume that org admin cannot be removed
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.FALSE):
with self.assertRaises(exception.UserError) as context:
handler.handle(data, None, None)
self.assertEqual(context.exception.status, httplib.BAD_REQUEST)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
# now the mentor can be removed
request = http.HttpRequest()
# no role is offered now
request.POST = {'role': connection_model.NO_ROLE}
data = request_data.RequestData(request, None, kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
# assume that org admin can be removed
with mock.patch.object(
soc_profile_logic, 'isNoRoleEligibleForOrg', return_value=rich_bool.TRUE):
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.NO_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.NO_ROLE, data.ndb_profile), message.content)
def testOrgAdminRoleToMentorRoleWhileNoRoleRequested(self):
"""Tests MENTOR ROLE if org admin offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.ORG_ADMIN_ROLE)
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.MENTOR_ROLE, data.ndb_profile), message.content)
def testOrgAdminRoleToMentorRoleWhileRoleRequested(self):
"""Tests MENTOR ROLE if org admin offered and user requests role."""
# user is an org admin for organization
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# assume that org admin cannot be removed
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
with mock.patch.object(
profile_logic, 'isMentorRoleEligibleForOrg',
return_value=rich_bool.FALSE):
with self.assertRaises(exception.UserError) as context:
handler.handle(data, None, None)
self.assertEqual(context.exception.status, httplib.BAD_REQUEST)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
# now the org admin can be removed
request = http.HttpRequest()
# mentor role is offered now
request.POST = {'role': connection_model.MENTOR_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
# seed an organization administrator who makes the decision
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
# assume that org admin can be removed
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
with mock.patch.object(
profile_logic, 'isMentorRoleEligibleForOrg',
return_value=rich_bool.TRUE):
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.MENTOR_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# connection changed, so seen by properties are changed
self.assertFalse(connection.seen_by_user)
self.assertTrue(connection.seen_by_org)
# check that a connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIn(_generatedMessageContent(
connection_model.MENTOR_ROLE, data.ndb_profile), message.content)
def testOrgAdminRoleToOrgAdminRoleWhileNoRoleRequested(self):
"""Tests ORG ADMIN if org admin offered and user requests no role."""
profile = profile_utils.seedNDBProfile(self.program.key())
# user does not request any role from organization
connection = connection_utils.seed_new_connection(
profile.key, self.org.key, org_role=connection_model.ORG_ADMIN_ROLE)
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# org admin role is offered now
request.POST = {'role': connection_model.ORG_ADMIN_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.NO_ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertNotIn(self.org.key, profile.admin_for)
self.assertNotIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
def testOrgAdminRoleToOrgAdminRoleWhileRoleRequested(self):
"""Tests ORG ADMIN if org admin offered and user requests role."""
# user is a org admin for organization
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.org.key])
connection = connection_model.Connection.query(
connection_model.Connection.organization == self.org.key,
ancestor=profile.key).get()
old_seen_by_org = connection.seen_by_org
old_seen_by_user = connection.seen_by_user
self.kwargs = {
'sponsor': self.sponsor.link_id,
'program': self.program.program_id,
'user': profile.profile_id,
'id': connection.key.id()
}
request = http.HttpRequest()
# org admin role is offered now
request.POST = {'role': connection_model.ORG_ADMIN_ROLE}
data = request_data.RequestData(request, None, self.kwargs)
handler = connection_view.OrgActionsFormHandler(self.view, url='unused')
handler.handle(data, None, None)
# check if all data is updated properly
connection = connection.key.get()
profile = profile.key.get()
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertIn(self.org.key, profile.admin_for)
self.assertIn(self.org.key, profile.mentor_for)
# nothing has changed, so seen by properties are not changed
self.assertEqual(connection.seen_by_user, old_seen_by_user)
self.assertEqual(connection.seen_by_org, old_seen_by_org)
# check that no connection message is created
query = connection_model.ConnectionMessage.query(ancestor=connection.key)
message = query.get()
self.assertIsNone(message)
|
apache-2.0
| -1,343,540,183,369,436,200
| 39.232009
| 83
| 0.711332
| false
| 3.754123
| true
| false
| false
|
stefanseefeld/numba
|
numba/tests/test_cffi.py
|
1
|
4966
|
from __future__ import print_function, division, absolute_import
import array
import numpy as np
import sys
from numba import unittest_support as unittest
from numba import jit, cffi_support, types, errors
from numba.compiler import compile_isolated, Flags
from numba.tests.support import TestCase, tag
import numba.tests.cffi_usecases as mod
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
@unittest.skipUnless(cffi_support.SUPPORTED,
"CFFI not supported -- please install the cffi module")
class TestCFFI(TestCase):
# Need to run the tests serially because of race conditions in
# cffi's OOL mode.
_numba_parallel_test_ = False
def setUp(self):
mod.init()
mod.init_ool()
def test_type_map(self):
signature = cffi_support.map_type(mod.ffi.typeof(mod.cffi_sin))
self.assertEqual(len(signature.args), 1)
self.assertEqual(signature.args[0], types.double)
def _test_function(self, pyfunc, flags=enable_pyobj_flags):
cres = compile_isolated(pyfunc, [types.double], flags=flags)
cfunc = cres.entry_point
for x in [-1.2, -1, 0, 0.1, 3.14]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_sin_function(self):
self._test_function(mod.use_cffi_sin)
@tag('important')
def test_sin_function_npm(self):
self._test_function(mod.use_cffi_sin, flags=no_pyobj_flags)
def test_sin_function_ool(self, flags=enable_pyobj_flags):
self._test_function(mod.use_cffi_sin_ool)
def test_sin_function_npm_ool(self):
self._test_function(mod.use_cffi_sin_ool, flags=no_pyobj_flags)
def test_two_funcs(self):
# Check that two constant functions don't get mixed up.
self._test_function(mod.use_two_funcs)
def test_two_funcs_ool(self):
self._test_function(mod.use_two_funcs_ool)
def test_function_pointer(self):
pyfunc = mod.use_func_pointer
cfunc = jit(nopython=True)(pyfunc)
for (fa, fb, x) in [
(mod.cffi_sin, mod.cffi_cos, 1.0),
(mod.cffi_sin, mod.cffi_cos, -1.0),
(mod.cffi_cos, mod.cffi_sin, 1.0),
(mod.cffi_cos, mod.cffi_sin, -1.0),
(mod.cffi_sin_ool, mod.cffi_cos_ool, 1.0),
(mod.cffi_sin_ool, mod.cffi_cos_ool, -1.0),
(mod.cffi_cos_ool, mod.cffi_sin_ool, 1.0),
(mod.cffi_cos_ool, mod.cffi_sin_ool, -1.0),
(mod.cffi_sin, mod.cffi_cos_ool, 1.0),
(mod.cffi_sin, mod.cffi_cos_ool, -1.0),
(mod.cffi_cos, mod.cffi_sin_ool, 1.0),
(mod.cffi_cos, mod.cffi_sin_ool, -1.0)]:
expected = pyfunc(fa, fb, x)
got = cfunc(fa, fb, x)
self.assertEqual(got, expected)
# A single specialization was compiled for all calls
self.assertEqual(len(cfunc.overloads), 1, cfunc.overloads)
def test_user_defined_symbols(self):
pyfunc = mod.use_user_defined_symbols
cfunc = jit(nopython=True)(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def check_vector_sin(self, cfunc, x, y):
cfunc(x, y)
np.testing.assert_allclose(y, np.sin(x))
def _test_from_buffer_numpy_array(self, pyfunc, dtype):
x = np.arange(10).astype(dtype)
y = np.zeros_like(x)
cfunc = jit(nopython=True)(pyfunc)
self.check_vector_sin(cfunc, x, y)
@tag('important')
def test_from_buffer_float32(self):
self._test_from_buffer_numpy_array(mod.vector_sin_float32, np.float32)
def test_from_buffer_float64(self):
self._test_from_buffer_numpy_array(mod.vector_sin_float64, np.float64)
def test_from_buffer_struct(self):
n = 10
x = np.arange(n) + np.arange(n * 2, n * 3) * 1j
y = np.zeros(n)
real_cfunc = jit(nopython=True)(mod.vector_extract_real)
real_cfunc(x, y)
np.testing.assert_equal(x.real, y)
imag_cfunc = jit(nopython=True)(mod.vector_extract_imag)
imag_cfunc(x, y)
np.testing.assert_equal(x.imag, y)
@unittest.skipIf(sys.version_info < (3,),
"buffer protocol on array.array needs Python 3+")
def test_from_buffer_pyarray(self):
pyfunc = mod.vector_sin_float32
cfunc = jit(nopython=True)(pyfunc)
x = array.array("f", range(10))
y = array.array("f", [0] * len(x))
self.check_vector_sin(cfunc, x, y)
def test_from_buffer_error(self):
pyfunc = mod.vector_sin_float32
cfunc = jit(nopython=True)(pyfunc)
# Non-contiguous array
x = np.arange(10).astype(np.float32)[::2]
y = np.zeros_like(x)
with self.assertRaises(errors.TypingError) as raises:
cfunc(x, y)
self.assertIn("from_buffer() unsupported on non-contiguous buffers",
str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
| -4,280,643,776,205,915,600
| 33.727273
| 78
| 0.610552
| false
| 3.135101
| true
| false
| false
|
jackpien/rosbots
|
ros_ws/src/rosbots_driver/scripts/rosbots_driver/motor_driver.py
|
1
|
4631
|
#!/usr/bin/env python
#import RPIO as GPIO
import RPi.GPIO as GPIO
from RPIO import PWM
import RPIO
import rospy
from geometry_msgs.msg import Twist
class MotorDriverL9110S:
# Broadcom pin outs
# https://www.element14.com/community/servlet/JiveServlet/previewBody/73950-102-10-339300/pi3_gpio.png
left_ia = 23
left_ib = 24
right_ia = 20
right_ib = 21
encoder_right = 22
encoder_left = 17
pwm_subcycle_time_us = 20000 # 20ms cycle for PWM
pwm_max_width = 20000
pwm_granularity = 10
def __init__(self):
rospy.init_node('motor_driver', anonymous=True)
rospy.Subscriber("twist", Twist, self.twist_callback)
rospy.on_shutdown(self.shutdown_cb)
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.cleanup()
GPIO.setup(self.left_ib, GPIO.OUT)
GPIO.setup(self.right_ib, GPIO.OUT)
GPIO.setup(self.encoder_right, GPIO.IN) # Right
GPIO.setup(self.encoder_left, GPIO.IN) # Left
self._servo = PWM.Servo(subcycle_time_us=self.pwm_subcycle_time_us)
self._servo.set_servo(self.left_ia, 0)
self._servo.set_servo(self.right_ia, 0)
GPIO.output(self.left_ib, GPIO.LOW)
GPIO.output(self.right_ib, GPIO.LOW)
# Two GPIO interrupt callbacks for encoder
RPIO.setup(self.encoder_right, RPIO.IN)
RPIO.add_interrupt_callback(self.encoder_right,
self.encoder_callback, edge='rising',
debounce_timeout_ms=10,
pull_up_down=RPIO.PUD_DOWN,
threaded_callback=True)
RPIO.setup(self.encoder_left, RPIO.IN)
RPIO.add_interrupt_callback(self.encoder_left,
self.encoder_callback, edge='rising',
debounce_timeout_ms=10,
pull_up_down=RPIO.PUD_DOWN,
threaded_callback=True)
# Starts waiting for interrupts
RPIO.wait_for_interrupts(threaded=True)
def shutdown_cb(self):
rospy.loginfo(rospy.get_caller_id() + ": Shutdown callback")
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
self._servo.stop_servo(self.left_ia)
self._servo.stop_servo(self.right_ia)
GPIO.output(self.left_ib, GPIO.LOW)
GPIO.output(self.right_ib, GPIO.LOW)
GPIO.cleanup()
RPIO.cleanup()
def twist_callback(self, data):
rospy.loginfo(rospy.get_caller_id() + \
": Linear.x: %f -- Angular.z: %f", \
data.linear.x, data.angular.z)
x_dir = max(-1, min(1, data.linear.x))
z_ang = max(-1, min(1, data.angular.z))
lw = x_dir
rw = x_dir
if z_ang != 0:
# Left wheel faster than right
lw -= z_ang
rw += z_ang
lw = max(-1, min(1, lw))
rw = max(-1, min(1, rw))
rospy.loginfo(rospy.get_caller_id() + ": lw: %f -- rw: %f", lw, rw)
if lw == 0:
self._servo.set_servo(self.left_ia, 0)
GPIO.output(self.left_ib, GPIO.LOW)
else:
if lw > 0:
pw = self.pwm_max_width * lw
GPIO.output(self.left_ib, GPIO.LOW)
else:
pw = self.pwm_max_width - (self.pwm_max_width * (lw*-1))
GPIO.output(self.left_ib, GPIO.HIGH)
pw = int(pw/self.pwm_granularity) * self.pwm_granularity
self._servo.set_servo(self.left_ia, pw)
if rw == 0:
self._servo.set_servo(self.right_ia, 0)
GPIO.output(self.right_ib, GPIO.LOW)
else:
if rw > 0:
pw = self.pwm_max_width * rw
GPIO.output(self.right_ib, GPIO.LOW)
else:
pw = self.pwm_max_width - (self.pwm_max_width * (rw*-1))
GPIO.output(self.right_ib, GPIO.HIGH)
pw = int(pw/self.pwm_granularity) * self.pwm_granularity
self._servo.set_servo(self.right_ia, pw)
def encoder_callback(self, gpio_id, val):
rospy.loginfo(rospy.get_caller_id() + ": gpio %s: %s", gpio_id, val)
def main():
mdriver = MotorDriverL9110S()
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
main()
|
gpl-3.0
| 444,582,640,532,802,600
| 30.937931
| 106
| 0.525157
| false
| 3.44825
| false
| false
| false
|
bieschke/nuffle
|
lib/python/formencode/validators.py
|
1
|
59292
|
## FormEncode, a Form processor
## Copyright (C) 2003, Ian Bicking <ianb@colorstudy.com>
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## NOTE: In the context of the Python environment, I interpret "dynamic
## linking" as importing -- thus the LGPL applies to the contents of
## the modules, but make no requirements on code importing these
## modules.
"""
Validator/Converters for use with FormEncode.
"""
import re
DateTime = None
mxlookup = None
httplib = None
urlparse = None
from interfaces import *
from api import *
sha = random = None
import cgi
import fieldstorage
True, False = (1==1), (0==1)
############################################################
## Wrapper Validators
############################################################
datetime_module = None
mxDateTime_module = None
def import_datetime(module_type):
global datetime_module, mxDateTime_module
if module_type is None:
try:
if datetime_module is None:
import datetime as datetime_module
return datetime_module
except ImportError:
if mxDateTime_module is None:
from mx import DateTime as mxDateTime_module
return mxDateTime_module
module_type = module_type.lower()
assert module_type in ('datetime', 'mxdatetime')
if module_type == 'datetime':
if datetime_module is None:
import datetime as datetime_module
return datetime_module
else:
if mxDateTime_module is None:
from mx import DateTime as mxDateTime_module
return mxDateTime_module
def datetime_now(module):
if module.__name__ == 'datetime':
return module.datetime.now()
else:
return module.now()
def datetime_makedate(module, year, month, day):
if module.__name__ == 'datetime':
return module.date(year, month, day)
else:
try:
return module.DateTime(year, month, day)
except module.RangeError, e:
raise ValueError(str(e))
class ConfirmType(FancyValidator):
"""
Confirms that the input/output is of the proper type.
Uses the parameters:
subclass:
The class or a tuple of classes; the item must be an instance
of the class or a subclass.
type:
A type or tuple of types (or classes); the item must be of
the exact class or type. Subclasses are not allowed.
Examples::
>>> cint = ConfirmType(subclass=int)
>>> cint.to_python(True)
True
>>> cint.to_python('1')
Traceback (most recent call last):
...
Invalid: '1' is not a subclass of <type 'int'>
>>> cintfloat = ConfirmType(subclass=(float, int))
>>> cintfloat.to_python(1.0), cintfloat.from_python(1.0)
(1.0, 1.0)
>>> cintfloat.to_python(1), cintfloat.from_python(1)
(1, 1)
>>> cintfloat.to_python(None)
Traceback (most recent call last):
...
Invalid: None is not a subclass of one of the types <type 'float'>, <type 'int'>
>>> cint2 = ConfirmType(type=int)
>>> cint2.from_python(True)
Traceback (most recent call last):
...
Invalid: True must be of the type <type 'int'>
"""
subclass = None
type = None
messages = {
'subclass': "%(object)r is not a subclass of %(subclass)s",
'inSubclass': "%(object)r is not a subclass of one of the types %(subclassList)s",
'inType': "%(object)r must be one of the types %(typeList)s",
'type': "%(object)r must be of the type %(type)s",
}
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if self.subclass:
if isinstance(self.subclass, list):
self.subclass = tuple(self.subclass)
elif not isinstance(self.subclass, tuple):
self.subclass = (self.subclass,)
self.validate_python = self.confirm_subclass
if self.type:
if isinstance(self.type, list):
self.type = tuple(self.type)
elif not isinstance(self.subclass, tuple):
self.type = (self.type,)
self.validate_python = self.confirm_type
def confirm_subclass(self, value, state):
if not isinstance(value, self.subclass):
if len(self.subclass) == 1:
msg = self.message('subclass', state, object=value,
subclass=self.subclass[0])
else:
subclass_list = ', '.join(map(str, self.subclass))
msg = self.message('inSubclass', state, object=value,
subclassList=subclass_list)
raise Invalid(msg, value, state)
def confirm_type(self, value, state):
for t in self.type:
if type(value) is t:
break
else:
if len(self.type) == 1:
msg = self.message('type', state, object=value,
type=self.type[0])
else:
msg = self.message('inType', state, object=value,
typeList=', '.join(map(str, self.type)))
raise Invalid(msg, value, state)
return value
class Wrapper(FancyValidator):
"""
Used to convert functions to validator/converters.
You can give a simple function for `to_python`, `from_python`,
`validate_python` or `validate_other`. If that function raises an
exception, the value is considered invalid. Whatever value the
function returns is considered the converted value.
Unlike validators, the `state` argument is not used. Functions
like `int` can be used here, that take a single argument.
Examples::
>>> def downcase(v):
... return v.lower()
>>> wrap = Wrapper(to_python=downcase)
>>> wrap.to_python('This')
'this'
>>> wrap.from_python('This')
'This'
>>> wrap2 = Wrapper(from_python=downcase)
>>> wrap2.from_python('This')
'this'
>>> wrap2.from_python(1)
Traceback (most recent call last):
...
Invalid: 'int' object has no attribute 'lower'
>>> wrap3 = Wrapper(validate_python=int)
>>> wrap3.to_python('1')
'1'
>>> wrap3.to_python('a')
Traceback (most recent call last):
...
Invalid: invalid literal for int(): a
"""
func_to_python = None
func_from_python = None
func_validate_python = None
func_validate_other = None
def __init__(self, *args, **kw):
for n in ['to_python', 'from_python', 'validate_python',
'validate_other']:
if kw.has_key(n):
kw['func_%s' % n] = kw[n]
del kw[n]
FancyValidator.__init__(self, *args, **kw)
self._to_python = self.wrap(self.func_to_python)
self._from_python = self.wrap(self.func_from_python)
self.validate_python = self.wrap(self.func_validate_python)
self.validate_other = self.wrap(self.func_validate_other)
def wrap(self, func):
if not func:
return None
def result(value, state, func=func):
try:
return func(value)
except Exception, e:
raise Invalid(str(e), {}, value, state)
return result
class Constant(FancyValidator):
"""
This converter converts everything to the same thing.
I.e., you pass in the constant value when initializing, then all
values get converted to that constant value.
This is only really useful for funny situations, like::
fromEmailValidator = ValidateAny(
ValidEmailAddress(),
Constant('unknown@localhost'))
In this case, the if the email is not valid
``'unknown@localhost'`` will be used instead. Of course, you
could use ``if_invalid`` instead.
Examples::
>>> Constant('X').to_python('y')
'X'
"""
__unpackargs__ = ('value',)
def _to_python(self, value, state):
return self.value
_from_python = _to_python
############################################################
## Normal validators
############################################################
class MaxLength(FancyValidator):
"""
Invalid if the value is longer than `maxLength`. Uses len(),
so it can work for strings, lists, or anything with length.
Examples::
>>> max5 = MaxLength(5)
>>> max5.to_python('12345')
'12345'
>>> max5.from_python('12345')
'12345'
>>> max5.to_python('123456')
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.from_python('123456')
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.to_python([1, 2, 3])
[1, 2, 3]
>>> max5.to_python([1, 2, 3, 4, 5, 6])
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.to_python(5)
Traceback (most recent call last):
...
Invalid: Invalid value (value with length expected)
"""
__unpackargs__ = ('maxLength',)
messages = {
'tooLong': "Enter a value less than %(maxLength)i characters long",
'invalid': "Invalid value (value with length expected)",
}
def validate_python(self, value, state):
try:
if value and \
len(value) > self.maxLength:
raise Invalid(self.message('tooLong', state,
maxLength=self.maxLength),
value, state)
else:
return None
except TypeError:
raise Invalid(self.message('invalid', state),
value, state)
class MinLength(FancyValidator):
"""
Invalid if the value is shorter than `minlength`. Uses len(),
so it can work for strings, lists, or anything with length.
Examples::
>>> min5 = MinLength(5)
>>> min5.to_python('12345')
'12345'
>>> min5.from_python('12345')
'12345'
>>> min5.to_python('1234')
Traceback (most recent call last):
...
Invalid: Enter a value more than 5 characters long
>>> min5.from_python('1234')
Traceback (most recent call last):
...
Invalid: Enter a value more than 5 characters long
>>> min5.to_python([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> min5.to_python([1, 2, 3])
Traceback (most recent call last):
...
Invalid: Enter a value more than 5 characters long
>>> min5.to_python(5)
Traceback (most recent call last):
...
Invalid: Invalid value (value with length expected)
"""
__unpackargs__ = ('minLength',)
messages = {
'tooShort': "Enter a value more than %(minLength)i characters long",
'invalid': "Invalid value (value with length expected)",
}
def validate_python(self, value, state):
try:
if len(value) < self.minLength:
raise Invalid(self.message('tooShort', state,
minLength=self.minLength),
value, state)
except TypeError:
raise Invalid(self.message('invalid', state),
value, state)
class NotEmpty(FancyValidator):
"""
Invalid if value is empty (empty string, empty list, etc).
Generally for objects that Python considers false, except zero
which is not considered invalid.
Examples::
>>> ne = NotEmpty(messages={'empty': 'enter something'})
>>> ne.to_python('')
Traceback (most recent call last):
...
Invalid: enter something
>>> ne.to_python(0)
0
"""
messages = {
'empty': "Please enter a value",
}
def validate_python(self, value, state):
if value == 0:
# This isn't "empty" for this definition.
return value
if not value:
raise Invalid(self.message('empty', state),
value, state)
class Empty(FancyValidator):
"""
Invalid unless the value is empty. Use cleverly, if at all.
Examples::
>>> Empty.to_python(0)
Traceback (most recent call last):
...
Invalid: You cannot enter a value here
"""
messages = {
'notEmpty': "You cannot enter a value here",
}
def validate_python(self, value, state):
if value or value == 0:
raise Invalid(self.message('notEmpty', state),
value, state)
class Regex(FancyValidator):
"""
Invalid if the value doesn't match the regular expression `regex`.
The regular expression can be a compiled re object, or a string
which will be compiled for you.
Use strip=True if you want to strip the value before validation,
and as a form of conversion (often useful).
Examples::
>>> cap = Regex(r'^[A-Z]+$')
>>> cap.to_python('ABC')
'ABC'
>>> cap.from_python('abc')
Traceback (most recent call last):
...
Invalid: The input is not valid
>>> cap.to_python(1)
Traceback (most recent call last):
...
Invalid: The input must be a string (not a <type 'int'>: 1)
>>> Regex(r'^[A-Z]+$', strip=True).to_python(' ABC ')
'ABC'
>>> Regex(r'this', regexOps=('I',)).to_python('THIS')
'THIS'
"""
regexOps = ()
strip = False
regex = None
__unpackargs__ = ('regex',)
messages = {
'invalid': "The input is not valid",
}
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if isinstance(self.regex, str):
ops = 0
assert not isinstance(self.regexOps, str), (
"regexOps should be a list of options from the re module "
"(names, or actual values)")
for op in self.regexOps:
if isinstance(op, str):
ops |= getattr(re, op)
else:
ops |= op
self.regex = re.compile(self.regex, ops)
def validate_python(self, value, state):
self.assert_string(value, state)
if self.strip and (isinstance(value, str) or isinstance(value, unicode)):
value = value.strip()
if not self.regex.search(value):
raise Invalid(self.message('invalid', state),
value, state)
def _to_python(self, value, state):
if self.strip and \
(isinstance(value, str) or isinstance(value, unicode)):
return value.strip()
return value
class PlainText(Regex):
"""
Test that the field contains only letters, numbers, underscore,
and the hyphen. Subclasses Regex.
Examples::
>>> PlainText.to_python('_this9_')
'_this9_'
>>> PlainText.from_python(' this ')
Traceback (most recent call last):
...
Invalid: Enter only letters, numbers, or _ (underscore)
>>> PlainText(strip=True).to_python(' this ')
'this'
>>> PlainText(strip=True).from_python(' this ')
' this '
"""
regex = r"^[a-zA-Z_\-0-9]*$"
messages = {
'invalid': 'Enter only letters, numbers, or _ (underscore)',
}
class OneOf(FancyValidator):
"""
Tests that the value is one of the members of a given list.
If ``testValueLists=True``, then if the input value is a list or
tuple, all the members of the sequence will be checked (i.e., the
input must be a subset of the allowed values).
Use ``hideList=True`` to keep the list of valid values out of the
error message in exceptions.
Examples::
>>> oneof = OneOf([1, 2, 3])
>>> oneof.to_python(1)
1
>>> oneof.to_python(4)
Traceback (most recent call last):
...
Invalid: Value must be one of: 1; 2; 3 (not 4)
>>> oneof(testValueList=True).to_python([2, 3, [1, 2, 3]])
[2, 3, [1, 2, 3]]
>>> oneof.to_python([2, 3, [1, 2, 3]])
Traceback (most recent call last):
...
Invalid: Value must be one of: 1; 2; 3 (not [2, 3, [1, 2, 3]])
"""
list = None
testValueList = False
hideList = False
__unpackargs__ = ('list',)
messages = {
'invalid': "Invalid value",
'notIn': "Value must be one of: %(items)s (not %(value)r)",
}
def validate_python(self, value, state):
if self.testValueList and isinstance(value, (list, tuple)):
for v in value:
self.validate_python(v, state)
else:
if not value in self.list:
if self.hideList:
raise Invalid(self.message('invalid', state),
value, state)
else:
items = '; '.join(map(str, self.list))
raise Invalid(self.message('notIn', state,
items=items,
value=value),
value, state)
class DictConverter(FancyValidator):
"""
Converts values based on a dictionary which has values as keys for
the resultant values.
If ``allowNull`` is passed, it will not balk if a false value
(e.g., '' or None) is given (it will return None in these cases).
to_python takes keys and gives values, from_python takes values and
gives keys.
If you give hideDict=True, then the contents of the dictionary
will not show up in error messages.
Examples::
>>> dc = DictConverter({1: 'one', 2: 'two'})
>>> dc.to_python(1)
'one'
>>> dc.from_python('one')
1
>>> dc.to_python(3)
Traceback (most recent call last):
Invalid: Enter a value from: 1; 2
>>> dc2 = dc(hideDict=True)
>>> dc2.hideDict
True
>>> dc2.dict
{1: 'one', 2: 'two'}
>>> dc2.to_python(3)
Traceback (most recent call last):
Invalid: Choose something
>>> dc.from_python('three')
Traceback (most recent call last):
Invalid: Nothing in my dictionary goes by the value 'three'. Choose one of: 'one'; 'two'
"""
dict = None
hideDict = False
__unpackargs__ = ('dict',)
messages = {
'keyNotFound': "Choose something",
'chooseKey': "Enter a value from: %(items)s",
'valueNotFound': "That value is not known",
'chooseValue': "Nothing in my dictionary goes by the value %(value)s. Choose one of: %(items)s",
}
def _to_python(self, value, state):
try:
return self.dict[value]
except KeyError:
if self.hideDict:
raise Invalid(self.message('keyNotFound', state),
value, state)
else:
items = '; '.join(map(repr, self.dict.keys()))
raise Invalid(self.message('chooseKey', state,
items=items),
value, state)
def _from_python(self, value, state):
for k, v in self.dict.items():
if value == v:
return k
if self.hideDict:
raise Invalid(self.message('valueNotFound', state),
value, state)
else:
items = '; '.join(map(repr, self.dict.values()))
raise Invalid(self.message('chooseValue', state,
value=repr(value),
items=items),
value, state)
class IndexListConverter(FancyValidator):
"""
Converts a index (which may be a string like '2') to the value in
the given list.
Examples::
>>> index = IndexListConverter(['zero', 'one', 'two'])
>>> index.to_python(0)
'zero'
>>> index.from_python('zero')
0
>>> index.to_python('1')
'one'
>>> index.to_python(5)
Traceback (most recent call last):
Invalid: Index out of range
>>> index.to_python(None)
Traceback (most recent call last):
Invalid: Must be an integer index
>>> index.from_python('five')
Traceback (most recent call last):
Invalid: Item 'five' was not found in the list
"""
list = None
__unpackargs__ = ('list',)
messages = {
'integer': "Must be an integer index",
'outOfRange': "Index out of range",
'notFound': "Item %(value)s was not found in the list",
}
def _to_python(self, value, state):
try:
value = int(value)
except (ValueError, TypeError):
raise Invalid(self.message('integer', state),
value, state)
try:
return self.list[value]
except IndexError:
raise Invalid(self.message('outOfRange', state),
value, state)
def _from_python(self, value, state):
for i in range(len(self.list)):
if self.list[i] == value:
return i
raise Invalid(self.message('notFound', state,
value=repr(value)),
value, state)
class DateValidator(FancyValidator):
"""
Validates that a date is within the given range. Be sure to call
DateConverter first if you aren't expecting mxDateTime input.
earliest_date and latest_date may be functions; if so, they will
be called each time before validating.
"""
earliest_date = None
latest_date = None
after_now = False
# Use 'datetime' to force the Python 2.3+ datetime module, or
# 'mxDateTime' to force the mxDateTime module (None means use
# datetime, or if not present mxDateTime)
datetime_module = None
messages = {
'after': "Date must be after %(date)s",
'before': "Date must be before %(date)s",
# Double %'s, because this will be substituted twice:
'date_format': "%%A, %%d %%B %%Y",
'future': "The date must be sometime in the future",
}
def validate_python(self, value, state):
if self.earliest_date:
if callable(self.earliest_date):
earliest_date = self.earliest_date()
else:
earliest_date = self.earliest_date
if value < earliest_date:
date_formatted = earliest_date.strftime(
self.message('date_format', state))
raise Invalid(
self.message('after', state,
date=date_formatted),
value, state)
if self.latest_date:
if callable(self.latest_date):
latest_date = self.latest_date()
else:
latest_date = self.latest_date
if value > latest_date:
date_formatted = latest_date.strftime(
self.message('date_format', state))
raise Invalid(
self.message('before', state,
date=date_formatted),
value, state)
if self.after_now:
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
if value < now:
date_formatted = now.strftime(
self.message('date_format', state))
raise Invalid(
self.message('future', state,
date=date_formatted),
value, state)
class Bool(FancyValidator):
"""
Always Valid, returns True or False based on the value and the
existance of the value.
Examples::
>>> Bool.to_python(0)
False
>>> Bool.to_python(1)
True
>>> Bool.to_python('')
False
>>> Bool.to_python(None)
False
"""
if_missing = False
def _to_python(self, value, state):
return bool(value)
_from_python = _to_python
class Int(FancyValidator):
"""
Convert a value to an integer.
"""
messages = {
'integer': "Please enter an integer value",
}
def _to_python(self, value, state):
try:
return int(value)
except (ValueError, TypeError):
raise Invalid(self.message('integer', state),
value, state)
_from_python = _to_python
class Number(FancyValidator):
"""
Convert a value to a float or integer. Tries to convert it to
an integer if no information is lost.
"""
messages = {
'number': "Please enter a number",
}
def _to_python(self, value, state):
try:
value = float(value)
if value == int(value):
return int(value)
return value
except ValueError:
raise Invalid(self.message('number', state),
value, state)
class String(FancyValidator):
"""
Converts things to string, but treats empty things as the empty
string.
Also takes a `max` and `min` argument, and the string length must
fall in that range.
"""
min = None
max = None
messages = {
'tooLong': "Enter a value less than %(max)i characters long",
'tooShort': "Enter a value %(min)i characters long or more",
}
def validate_python(self, value, state):
if (self.max is not None and value is not None
and len(value) > self.max):
raise Invalid(self.message('tooLong', state,
max=self.max),
value, state)
if (self.min is not None
and (not value or len(value) < self.min)):
raise Invalid(self.message('tooShort', state,
min=self.min),
value, state)
def _from_python(self, value, state):
if value:
return str(value)
if value == 0:
return str(value)
return ""
class Set(FancyValidator):
"""
This is for when you think you may return multiple values for a
certain field.
This way the result will always be a list, even if there's only
one result. It's equivalent to ForEach(convertToList=True).
"""
if_empty = ()
def _to_python(self, value, state):
if isinstance(value, (list, tuple)):
return value
elif value is None:
return []
else:
return [value]
class Email(FancyValidator):
"""
Validate an email address.
If you pass ``resolve_domain=True``, then it will try to resolve
the domain name to make sure it's valid. This takes longer, of
course. You must have the `pyDNS <http://pydns.sf.net>`_ modules
installed to look up MX records.
"""
resolve_domain = False
usernameRE = re.compile(r"^[a-z0-9\_\-']+", re.I)
domainRE = re.compile(r"^[a-z0-9\.\-]+\.[a-z]+$", re.I)
messages = {
'empty': 'Please enter an email address',
'noAt': 'An email address must contain a single @',
'badUsername': 'The username portion of the email address is invalid (the portion before the @: %(username)s)',
'badDomain': 'The domain portion of the email address is invalid (the portion after the @: %(domain)s)',
'domainDoesNotExist': 'The domain of the email address does not exist (the portion after the @: %(domain)s)',
}
def __init__(self, *args, **kw):
global mxlookup
FancyValidator.__init__(self, *args, **kw)
if self.resolve_domain:
if mxlookup is None:
try:
from DNS.lazy import mxlookup
except ImportError:
import warnings
warnings.warn(
"pyDNS <http://pydns.sf.net> is not installed on "
"your system (or the DNS package cannot be found). "
"I cannot resolve domain names in addresses")
raise
def validate_python(self, value, state):
if not value:
raise Invalid(
self.message('empty', state),
value, state)
value = value.strip()
splitted = value.split('@', 1)
if not len(splitted) == 2:
raise Invalid(
self.message('noAt', state),
value, state)
if not self.usernameRE.search(splitted[0]):
raise Invalid(
self.message('badUsername', state,
username=splitted[0]),
value, state)
if not self.domainRE.search(splitted[1]):
raise Invalid(
self.message('badDomain', state,
domain=splitted[1]),
value, state)
if self.resolve_domain:
domains = mxlookup(splitted[1])
if not domains:
raise Invalid(
self.message('domainDoesNotExist', state,
domain=splitted[1]),
value, state)
def _to_python(self, value, state):
return value.strip()
class URL(FancyValidator):
"""
Validate a URL, either http://... or https://. If check_exists
is true, then we'll actually make a request for the page.
If add_http is true, then if no scheme is present we'll add
http://
"""
check_exists = False
add_http = True
url_re = re.compile(r'^(http|https)://[a-z\-\.]+\.[a-z]+(?:[0-9]+)?(?:/.*)?$', re.I)
scheme_re = re.compile(r'^[a-zA-Z]+:')
messages = {
'noScheme': 'You must start your URL with http://, https://, etc',
'badURL': 'That is not a valid URL',
'httpError': 'An error occurred when trying to access the URL: %(error)s',
'notFound': 'The server responded that the page could not be found',
'status': 'The server responded with a bad status code (%(status)s)',
}
def _to_python(self, value, state):
value = value.strip()
if self.add_http:
if not self.scheme_re.search(value):
value = 'http://' + value
match = self.scheme_re.search(value)
if not match:
raise Invalid(
self.message('noScheme', state),
value, state)
value = match.group(0).lower() + value[len(match.group(0)):]
if not self.url_re.search(value):
raise Invalid(
self.message('badURL', state),
value, state)
if self.check_exists and (value.startswith('http://')
or value.startswith('https://')):
self._check_url_exists(value, state)
return value
def _check_url_exists(self, url, state):
global httplib, urlparse
if httplib is None:
import httplib
if urlparse is None:
import urlparse
scheme, netloc, path, params, query, fragment = urlparse.urlparse(
url, 'http')
if scheme == 'http':
ConnClass = httplib.HTTPConnection
else:
ConnClass = httplib.HTTPSConnection
try:
conn = ConnClass(netloc)
if params:
path += ';' + params
if query:
path += '?' + query
conn.request('HEAD', path)
res = conn.getresponse()
except httplib.HTTPException, e:
raise Invalid(
self.message('httpError', state, error=e),
state, url)
else:
if res.status == 404:
raise Invalid(
self.message('notFound', state),
state, url)
if res.status != 200:
raise Invalid(
self.message('status', state, status=res.status),
state, url)
class StateProvince(FancyValidator):
"""
Valid state or province code (two-letter).
Well, for now I don't know the province codes, but it does state
codes. Give your own `states` list to validate other state-like
codes; give `extraStates` to add values without losing the current
state values.
"""
states = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE',
'FL', 'GA', 'HI', 'IA', 'ID', 'IN', 'IL', 'KS', 'KY',
'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT',
'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT',
'VA', 'VT', 'WA', 'WI', 'WV', 'WY']
extraStates = []
__unpackargs__ = ('extraStates',)
messages = {
'empty': 'Please enter a state code',
'wrongLength': 'Please enter a state code with TWO letters',
'invalid': 'That is not a valid state code',
}
def validate_python(self, value, state):
value = str(value).strip().upper()
if not value:
raise Invalid(
self.message('empty', state),
value, state)
if not value or len(value) != 2:
raise Invalid(
self.message('wrongLength', state),
value, state)
if value not in self.states \
and not (self.extraStates and value in self.extraStates):
raise Invalid(
self.message('invalid', state),
value, state)
def _to_python(self, value, state):
return str(value).strip().upper()
class PhoneNumber(FancyValidator):
"""
Validates, and converts to ###-###-####, optionally with
extension (as ext.##...)
@@: should add international phone number support
"""
_phoneRE = re.compile(r'^\s*(?:1-)?(\d\d\d)[\- \.]?(\d\d\d)[\- \.]?(\d\d\d\d)(?:\s*ext\.?\s*(\d+))?\s*$', re.I)
messages = {
'phoneFormat': 'Please enter a number, with area code, in the form ###-###-####, optionally with "ext.####"',
}
def _to_python(self, value, state):
self.assert_string(value, state)
match = self._phoneRE.search(value)
if not match:
raise Invalid(
self.message('phoneFormat', state),
value, state)
return value
def _from_python(self, value, state):
self.assert_string(value, state)
match = self._phoneRE.search(value)
if not match:
raise Invalid(self.message('phoneFormat', state),
value, state)
result = '%s-%s-%s' % (match.group(1), match.group(2), match.group(3))
if match.group(4):
result = result + " ext.%s" % match.group(4)
return result
class FieldStorageUploadConverter(FancyValidator):
"""
Converts a cgi.FieldStorage instance to
a value that FormEncode can use for file
uploads.
"""
def _to_python(self, value, state):
if isinstance(value, cgi.FieldStorage):
return fieldstorage.convert_fieldstorage(value)
else:
return value
class DateConverter(FancyValidator):
"""
Validates and converts a textual date, like mm/yy, dd/mm/yy,
dd-mm-yy, etc, always assumes month comes second value is the
month.
Accepts English month names, also abbreviated. Returns value as
mx.DateTime object. Two year dates are assumed to be within
1950-2020, with dates from 21-49 being ambiguous and signaling an
error.
Use accept_day=False if you just want a month/year (like for a
credit card expiration date).
"""
## @@: accepts only US-style dates
accept_day = True
# also allowed: 'dd/mm/yyyy'
month_style = 'mm/dd/yyyy'
# Use 'datetime' to force the Python 2.3+ datetime module, or
# 'mxDateTime' to force the mxDateTime module (None means use
# datetime, or if not present mxDateTime)
datetime_module = None
_day_date_re = re.compile(r'^\s*(\d\d?)[\-\./\\](\d\d?|jan|january|feb|febuary|mar|march|apr|april|may|jun|june|jul|july|aug|august|sep|sept|september|oct|october|nov|november|dec|december)[\-\./\\](\d\d\d?\d?)\s*$', re.I)
_month_date_re = re.compile(r'^\s*(\d\d?|jan|january|feb|febuary|mar|march|apr|april|may|jun|june|jul|july|aug|august|sep|sept|september|oct|october|nov|november|dec|december)[\-\./\\](\d\d\d?\d?)\s*$', re.I)
_month_names = {
'jan': 1, 'january': 1,
'feb': 2, 'febuary': 2,
'mar': 3, 'march': 3,
'apr': 4, 'april': 4,
'may': 5,
'jun': 6, 'june': 6,
'jul': 7, 'july': 7,
'aug': 8, 'august': 8,
'sep': 9, 'sept': 9, 'september': 9,
'oct': 10, 'october': 10,
'nov': 11, 'november': 11,
'dec': 12, 'december': 12,
}
## @@: Feb. should be leap-year aware (but mxDateTime does catch that)
_monthDays = {
1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31,
9: 30, 10: 31, 11: 30, 12: 31}
messages = {
'badFormat': 'Please enter the date in the form %(format)s',
'monthRange': 'Please enter a month from 1 to 12',
'invalidDay': 'Please enter a valid day',
'dayRange': 'That month only has %(days)i days',
'invalidDate': 'That is not a valid day (%(exception)s)',
'unknownMonthName': "Unknown month name: %(month)s",
'invalidYear': 'Please enter a number for the year',
'fourDigitYear': 'Please enter a four-digit year',
'wrongFormat': 'Please enter the date in the form %(format)s',
}
def _to_python(self, value, state):
if self.accept_day:
return self.convert_day(value, state)
else:
return self.convert_month(value, state)
def convert_day(self, value, state):
self.assert_string(value, state)
match = self._day_date_re.search(value)
if not match:
raise Invalid(self.message('badFormat', state,
format=self.month_style),
value, state)
day = int(match.group(1))
try:
month = int(match.group(2))
except TypeError:
month = self.make_month(match.group(2), state)
else:
if self.month_style == 'mm/dd/yyyy':
month, day = day, month
year = self.make_year(match.group(3), state)
if month > 12 or month < 1:
raise Invalid(self.message('monthRange', state),
value, state)
if day < 1:
raise Invalid(self.message('invalidDay', state),
value, state)
if self._monthDays[month] < day:
raise Invalid(self.message('dayRange', state,
days=self._monthDays[month]),
value, state)
dt_mod = import_datetime(self.datetime_module)
try:
return datetime_makedate(dt_mod, year, month, day)
except ValueError, v:
raise Invalid(self.message('invalidDate', state,
exception=str(v)),
value, state)
def make_month(self, value, state):
try:
return int(value)
except ValueError:
value = value.lower().strip()
if self._month_names.has_key(value):
return self._month_names[value]
else:
raise Invalid(self.message('unknownMonthName', state,
month=value),
value, state)
def make_year(self, year, state):
try:
year = int(year)
except ValueError:
raise Invalid(self.message('invalidYear', state),
year, state)
if year <= 20:
year = year + 2000
if year >= 50 and year < 100:
year = year + 1900
if year > 20 and year < 50:
raise Invalid(self.message('fourDigitYear', state),
year, state)
return year
def convert_month(self, value, state):
match = self._month_date_re.search(value)
if not match:
raise Invalid(self.message('wrongFormat', state,
format='mm/yyyy'),
value, state)
month = self.make_month(match.group(1), state)
year = self.make_year(match.group(2), state)
if month > 12 or month < 1:
raise Invalid(self.message('monthRange', state),
value, state)
dt_mod = import_datetime(self.datetime_module)
return datetime_makedate(dt_mod, year, month, 1)
def _from_python(self, value, state):
if self.if_empty is not NoDefault and not value:
return ''
if self.accept_day:
return self.unconvert_day(value, state)
else:
return self.unconvert_month(value, state)
def unconvert_day(self, value, state):
# @@ ib: double-check, improve
return value.strftime("%m/%d/%Y")
def unconvert_month(self, value, state):
# @@ ib: double-check, improve
return value.strftime("%m/%Y")
class TimeConverter(FancyValidator):
"""
Converts times in the format HH:MM:SSampm to (h, m, s).
Seconds are optional.
For ampm, set use_ampm = True. For seconds, use_seconds = True.
Use 'optional' for either of these to make them optional.
Examples::
>>> tim = TimeConverter()
>>> tim.to_python('8:30')
(8, 30)
>>> tim.to_python('20:30')
(20, 30)
>>> tim.to_python('30:00')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 0-23
>>> tim.to_python('13:00pm')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 1-12
>>> tim.to_python('12:-1')
Traceback (most recent call last):
...
Invalid: You must enter a minute in the range 0-59
>>> tim.to_python('12:02pm')
(12, 2)
>>> tim.to_python('12:02am')
(0, 2)
>>> tim.to_python('1:00PM')
(13, 0)
>>> tim.from_python((13, 0))
'13:00:00'
>>> tim2 = tim(use_ampm=True, use_seconds=False)
>>> tim2.from_python((13, 0))
'1:00pm'
>>> tim2.from_python((0, 0))
'12:00am'
>>> tim2.from_python((12, 0))
'12:00pm'
"""
use_ampm = 'optional'
prefer_ampm = False
use_seconds = 'optional'
messages = {
'noAMPM': 'You must indicate AM or PM',
'tooManyColon': 'There are two many :\'s',
'noSeconds': 'You may not enter seconds',
'secondsRequired': 'You must enter seconds',
'minutesRequired': 'You must enter minutes (after a :)',
'badNumber': 'The %(part)s value you gave is not a number: %(number)r',
'badHour': 'You must enter an hour in the range %(range)s',
'badMinute': 'You must enter a minute in the range 0-59',
'badSecond': 'You must enter a second in the range 0-59',
}
def _to_python(self, value, state):
time = value.strip()
explicit_ampm = False
if self.use_ampm:
last_two = time[-2:].lower()
if last_two not in ('am', 'pm'):
if self.use_ampm != 'optional':
raise Invalid(
self.message('noAMPM', state),
value, state)
else:
offset = 0
else:
explicit_ampm = True
if last_two == 'pm':
offset = 12
else:
offset = 0
time = time[:-2]
else:
offset = 0
parts = time.split(':')
if len(parts) > 3:
raise Invalid(
self.message('tooManyColon', state),
value, state)
if len(parts) == 3 and not self.use_seconds:
raise Invalid(
self.message('noSeconds', state),
value, state)
if (len(parts) == 2
and self.use_seconds
and self.use_seconds != 'optional'):
raise Invalid(
self.message('secondsRequired', state),
value, state)
if len(parts) == 1:
raise Invalid(
self.message('minutesRequired', state),
value, state)
try:
hour = int(parts[0])
except ValueError:
raise Invalid(
self.message('badNumber', state, number=parts[0], part='hour'),
value, state)
if explicit_ampm:
if hour > 12 or hour < 1:
raise Invalid(
self.message('badHour', state, number=hour, range='1-12'),
value, state)
if hour == 12 and offset == 12:
# 12pm == 12
pass
elif hour == 12 and offset == 0:
# 12am == 0
hour = 0
else:
hour += offset
else:
if hour > 23 or hour < 0:
raise Invalid(
self.message('badHour', state,
number=hour, range='0-23'),
value, state)
try:
minute = int(parts[1])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[1], part='minute'),
value, state)
if minute > 59 or minute < 0:
raise Invalid(
self.message('badMinute', state, number=minute),
value, state)
if len(parts) == 3:
try:
second = int(parts[2])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[2], part='second'))
if second > 59 or second < 0:
raise Invalid(
self.message('badSecond', state, number=second),
value, state)
else:
second = None
if second is None:
return (hour, minute)
else:
return (hour, minute, second)
def _from_python(self, value, state):
if isinstance(value, (str, unicode)):
return value
if hasattr(value, 'hour'):
hour, minute = value.hour, value.minute
elif len(value) == 3:
hour, minute, second = value
elif len(value) == 2:
hour, minute = value
second = 0
ampm = ''
if ((self.use_ampm == 'optional' and self.prefer_ampm)
or (self.use_ampm and self.use_ampm != 'optional')):
ampm = 'am'
if hour > 12:
hour -= 12
ampm = 'pm'
elif hour == 12:
ampm = 'pm'
elif hour == 0:
hour = 12
if self.use_seconds:
return '%i:%02i:%02i%s' % (hour, minute, second, ampm)
else:
return '%i:%02i%s' % (hour, minute, ampm)
class PostalCode(Regex):
"""
US Postal codes (aka Zip Codes).
"""
regex = r'^\d\d\d\d\d(?:-\d\d\d\d)?$'
strip = True
messages = {
'invalid': 'Please enter a zip code (5 digits)',
}
class StripField(FancyValidator):
"""
Take a field from a dictionary, removing the key from the
dictionary.
``name`` is the key. The field value and a new copy of the
dictionary with that field removed are returned.
"""
__unpackargs__ = ('name',)
messages = {
'missing': 'The name %(name)s is missing',
}
def _to_python(self, valueDict, state):
v = valueDict.copy()
try:
field = v[self.name]
del v[self.name]
except KeyError:
raise Invalid(self.message('missing', state,
name=repr(self.name)),
valueDict, state)
return field, v
class StringBoolean(FancyValidator):
# Originally from TurboGears
"""
Converts a string to a boolean.
Values like 'true' and 'false' are considered True and False,
respectively; anything in ``true_values`` is true, anything in
``false_values`` is false, case-insensitive). The first item of
those lists is considered the preferred form.
"""
true_values = ['true', 't', 'yes', 'y', 'on', '1']
false_values = ['false', 'f', 'no', 'n', 'off', '0']
messages = { "string" : "Value should be %(true)r or %(false)r" }
def _to_python(self, value, state):
if isinstance(value, (str, unicode)):
value = value.strip().lower()
if value in self.true_values:
return True
if not value or value in self.false_values:
return False
raise Invalid(self.message("string", state,
true=self.true_values[0],
false=self.false_values[0]),
value, state)
return bool(value)
def _from_python(self, value, state):
if value:
return self.true_values[0]
else:
return self.false_values[0]
class SignedString(FancyValidator):
"""
Encodes a string into a signed string, and base64 encodes both the
signature string and a random nonce.
It is up to you to provide a secret, and to keep the secret handy
and consistent.
"""
messages = {
'malformed': 'Value does not contain a signature',
'badsig': 'Signature is not correct',
}
secret = None
nonce_length = 4
def _to_python(self, value, state):
global sha
if not sha:
import sha
assert secret, (
"You must give a secret")
parts = value.split(None, 1)
if not parts or len(parts) == 1:
raise Invalid(self.message('malformed', state),
value, state)
sig, rest = parts
sig = sig.decode('base64')
rest = sig.decord('base64')
nonce = rest[:self.nonce_length]
rest = rest[self.nonce_length:]
digest = sha.new(self.secret+nonce+rest).digest()
if digest != sig:
raise Invalid(self.message('badsig', state),
value, state)
return rest
def _from_python(self, value, state):
global sha
if not sha:
import sha
nonce = self.make_nonce()
value = str(value)
digest = sha.new(self.secret+nonce+rest).digest()
return self.encode(digest)+' '+self.encode(nonce+value)
def encode(self, value):
return value.encode('base64').strip().replace('\n', '')
def make_nonce(self):
global random
if not random:
import random
return ''.join([
chr(random.randrange(256))
for i in range(self.nonce_length)])
class FormValidator(FancyValidator):
"""
A FormValidator is something that can be chained with a
Schema. Unlike normal chaining the FormValidator can
validate forms that aren't entirely valid.
The important method is .validate(), of course. It gets passed a
dictionary of the (processed) values from the form. If you have
.validate_partial_form set to True, then it will get the incomplete
values as well -- use .has_key() to test if the field was able to
process any particular field.
Anyway, .validate() should return a string or a dictionary. If a
string, it's an error message that applies to the whole form. If
not, then it should be a dictionary of fieldName: errorMessage.
The special key "form" is the error message for the form as a whole
(i.e., a string is equivalent to {"form": string}).
Return None on no errors.
"""
validate_partial_form = False
validate_partial_python = None
validate_partial_other = None
class FieldsMatch(FormValidator):
"""
Tests that the given fields match, i.e., are identical. Useful
for password+confirmation fields. Pass the list of field names in
as `field_names`.
"""
show_match = False
field_names = None
validate_partial_form = True
__unpackargs__ = ('*', 'field_names')
messages = {
'invalid': "Fields do not match (should be %(match)s)",
'invalidNoMatch': "Fields do not match",
}
def validate_partial(self, field_dict, state):
for name in self.field_names:
if not field_dict.has_key(name):
return
self.validate_python(field_dict, state)
def validate_python(self, field_dict, state):
ref = field_dict[self.field_names[0]]
errors = {}
for name in self.field_names[1:]:
if field_dict.get(name, '') != ref:
if self.show_match:
errors[name] = self.message('invalid', state,
match=ref)
else:
errors[name] = self.message('invalidNoMatch', state)
if errors:
error_list = errors.items()
error_list.sort()
error_message = '<br>\n'.join(
['%s: %s' % (name, value) for name, value in error_list])
raise Invalid(error_message,
field_dict, state,
error_dict=errors)
class CreditCardValidator(FormValidator):
"""
Checks that credit card numbers are valid (if not real).
You pass in the name of the field that has the credit card
type and the field with the credit card number. The credit
card type should be one of "visa", "mastercard", "amex",
"dinersclub", "discover", "jcb".
You must check the expiration date yourself (there is no
relation between CC number/types and expiration dates).
"""
validate_partial_form = True
cc_type_field = 'ccType'
cc_number_field = 'ccNumber'
__unpackargs__ = ('cc_type_field', 'cc_number_field')
messages = {
'notANumber': "Please enter only the number, no other characters",
'badLength': "You did not enter a valid number of digits",
'invalidNumber': "That number is not valid",
}
def validate_partial(self, field_dict, state):
if not field_dict.get(self.cc_type_field, None) \
or not field_dict.get(self.cc_number_field, None):
return None
self.validate_python(field_dict, state)
def validate_python(self, field_dict, state):
errors = self._validateReturn(field_dict, state)
if errors:
error_list = errors.items()
error_list.sort()
raise Invalid(
'<br>\n'.join(["%s: %s" % (name, value)
for name, value in error_list]),
field_dict, state, error_dict=errors)
def _validateReturn(self, field_dict, state):
ccType = field_dict[self.cc_type_field].lower().strip()
number = field_dict[self.cc_number_field].strip()
number = number.replace(' ', '')
number = number.replace('-', '')
try:
long(number)
except ValueError:
return {self.cc_number_field: self.message('notANumber', state)}
assert self._cardInfo.has_key(ccType), (
"I can't validate that type of credit card")
foundValid = False
validLength = False
for prefix, length in self._cardInfo[ccType]:
if len(number) == length:
validLength = True
if (len(number) == length
and number.startswith(prefix)):
foundValid = True
break
if not validLength:
return {self.cc_number_field: self.message('badLength', state)}
if not foundValid:
return {self.cc_number_field: self.message('invalidNumber', state)}
if not self._validateMod10(number):
return {self.cc_number_field: self.message('invalidNumber', state)}
return None
def _validateMod10(self, s):
"""
This code by Sean Reifschneider, of tummy.com
"""
double = 0
sum = 0
for i in range(len(s) - 1, -1, -1):
for c in str((double + 1) * int(s[i])):
sum = sum + int(c)
double = (double + 1) % 2
return((sum % 10) == 0)
_cardInfo = {
"visa": [('4', 16),
('4', 13)],
"mastercard": [('51', 16),
('52', 16),
('53', 16),
('54', 16),
('55', 16)],
"discover": [('6011', 16)],
"amex": [('34', 15),
('37', 15)],
"dinersclub": [('300', 14),
('301', 14),
('302', 14),
('303', 14),
('304', 14),
('305', 14),
('36', 14),
('38', 14)],
"jcb": [('3', 16),
('2131', 15),
('1800', 15)],
}
|
gpl-2.0
| 8,930,365,845,739,722,000
| 32.114525
| 226
| 0.525029
| false
| 4.149776
| false
| false
| false
|
TeamODrKnow/doctor-know
|
main.py
|
1
|
15028
|
__author__ = 'jml168@pitt.edu (J. Matthew Landis)'
import os
import logging
import pickle
import webapp2
import time
import httplib2
import json
import tweepy
import haigha
from collections import Counter
from haigha.connections.rabbit_connection import RabbitConnection
from apiclient import discovery
from oauth2client import appengine
from oauth2client import client
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
#######################################################################
PROJECTID = '934763316754'
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """""
<h1>Warning: Please configure OAuth 2.0</h1>
<p>
To make this sample run you will need to populate the client_secrets.json file
found at:
</p>
<p>
<code>%s</code>.
</p>
<p>with information found on the <a
href="https://code.google.com/apis/console">APIs Console</a>.
</p>
""" % CLIENT_SECRETS
http = httplib2.Http(memcache)
service = discovery.build("plus", "v1", http=http)
bigquery_service = discovery.build("bigquery","v2", http=http)
consumer_key = "9xNrmD6hE0xnRSYdZt5t0XT0B"
consumer_secret = "kperqjklvPhBCVvHI96aZIfJu5w1DHI2BZoNMdBEvBPfmuZIYG"
access_token = "46501499-cijYvv9ixtQKHLSiLt9QaRtcmWeEKvvGZK5s6ukw7"
access_token_secret = "D127XCAN02BPb0ZtcreCG6dpBJyiiLCeD6ckS2MgdHqwG"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
decorator = appengine.oauth2decorator_from_clientsecrets(
CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/plus.me',
message=MISSING_CLIENT_SECRETS_MESSAGE)
bq_decorator = appengine.oauth2decorator_from_clientsecrets(
CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/bigquery',
message=MISSING_CLIENT_SECRETS_MESSAGE)
## Function to retrieve and render a template
def render_template(handler, templatename, templatevalues):
path = os.path.join(os.path.dirname(__file__), 'templates/' + templatename)
html = template.render(path, templatevalues)
handler.response.out.write(html)
#######################################################################
## Handles and loads index page
class MainPage(webapp2.RequestHandler):
def get(self):
nickname = "null"
email = "null"
user = users.get_current_user()
if user:
res = UserModel.query(UserModel.uid == user.user_id()).fetch()
if res:
ui = res[0]
nickname = ui.fname+ " " +ui.lname
email = user.email()
login = users.create_login_url('/')
else:
nickname = user.nickname()
email = user.email()
login = '/createProfile'
else:
ui = None
login = users.create_login_url('/')
logout = users.create_logout_url('/')
os.system("python stream.py")
template_values = {
'login': login,
'logout': logout,
'user': user,
'nickname': nickname,
'email': email
}
render_template(self, 'index.html', template_values)
#######################################################################
## Handle user info and profile
class CreateProfile(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
res = UserModel.query(UserModel.uid == user.user_id()).fetch()
if res:
self.redirect('/profile')
else:
template_data = {'logout':users.create_logout_url('/'), 'nickname': users.nickname()}
template_path = 'templates/createProfile.html'
self.response.out.write(template.render(template_path,template_data))
else:
self.redirect(user.create_login_url('/'))
#######################################################################
## process user profile
## check for user signed in, if so, save the entered information, otherwise, redirect them to the login page
class ProcessUser(webapp2.RequestHandler) :
def post(self) :
user = users.get_current_user()
if user:
fname = self.request.get('fname')
lname = self.request.get('lname')
fname.replace(" ", "")
lname.replace(" ", "")
words = self.request.get_all('word')
if (not(not fname)) & (not(not lname)):
NewUser = UserModel()
NewUser.uid = user.user_id()
NewUser.fname = fname
NewUser.lname = lname
NewUser.words = []
for word in words:
word.replace(" ", "")
if word:
NewUser.words+=[word]
NewUser.put()
self.redirect('/profile')
else:
self.redirect('/createProfile')
else:
self.redirect(users.create_login_url('/'))
#######################################################################
## Model Data
class DataHandler(webapp2.RequestHandler) :
@bq_decorator.oauth_aware
def get(self) :
if bq_decorator.has_credentials():
http = bq_decorator.http()
inputData = self.request.get("inputData")
queryData = {'query':'SELECT SUM(word_count) as WCount,corpus_date,group_concat(corpus) as Work FROM '
'[publicdata:samples.shakespeare] WHERE word="'+inputData+'" and corpus_date>0 GROUP BY corpus_date ORDER BY WCount'}
tableData = bigquery_service.jobs()
dataList = tableData.query(projectId=PROJECTID,body=queryData).execute(http)
resp = []
if 'rows' in dataList:
#parse dataList
for row in dataList['rows']:
for key,dict_list in row.iteritems():
count = dict_list[0]
year = dict_list[1]
corpus = dict_list[2]
resp.append({'count': count['v'],'year':year['v'],'corpus':corpus['v']})
else:
resp.append({'count':'0','year':'0','corpus':'0'})
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(resp))
else:
self.response.write(json.dumps({'error':'No credentials'}))
#######################################################################
## Model Words
class WordsHandler(webapp2.RequestHandler) :
@bq_decorator.oauth_aware
def get(self) :
if bq_decorator.has_credentials():
http = bq_decorator.http()
inputData = self.request.get("inputData")
queryData = {'query':'SELECT text FROM '
'[doctor-know:rtda.tweets] WHERE Words CONTAINS "'+inputData+'"GROUP BY text ORDER BY text LIMIT 150'}
tableData = bigquery_service.jobs()
dataList = tableData.query(projectId=PROJECTID,body=queryData).execute(http)
resp = {}
resp['text'] = status.text
resp['created_at'] = time.mktime(status.created_at.timetuple())
resp['geo'] = status.geo
resp['source'] = status.source
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(resp))
else:
self.response.write(json.dumps({'error':'No credentials'}))
#######################################################################
## Model Words
class WordsHandler(webapp2.RequestHandler) :
inputData = "yes"
@bq_decorator.oauth_aware
def get(self) :
if bq_decorator.has_credentials():
http = bq_decorator.http()
inputData = self.request.get("inputData")
queryData = {'query':'SELECT text FROM '
'[doctor-know:rtda.tweets] WHERE text CONTAINS "'+inputData+'" GROUP BY text ORDER BY text LIMIT 300'}
tableData = bigquery_service.jobs()
dataList = tableData.query(projectId=PROJECTID,body=queryData).execute(http)
tweets = []
if 'rows' in dataList:
#parse dataList
count = 0
for row in dataList['rows']:
for key,dict_list in row.iteritems():
tweet = dict_list[0]
count += 1
tweets.append({'text': tweet})
if count == 300:
break
ignore_words = [ "fuck", "shit", "cock", "penis", "porn"]
words = []
for tweet in tweets:
tt = tweet.get('text', "")
for word in tt.split():
if "http" in word:
continue
if word not in ignore_words:
words.append(word)
resp = Counter(words)
resp.headers.add('Access-Control-Allow-Origin', "*")
return resp
# self.response.headers['Content-Type'] = 'application/json'
# self.response.out.write(json.dumps(tweets))
# else:
# self.response.write(json.dumps({'error':'No credentials'}))
#######################################################################
## Profile Page
class ProfilePage(webapp2.RequestHandler) :
def get(self):
user = users.get_current_user()
if user:
res = UserModel.query(UserModel.uid == user.user_id()).fetch()
if res:
ui = res[0]
template_data = {'firstname': ui.fname, 'lastname': ui.lname, 'words': ui.words, 'nickname': ui.fname+ " " +ui.lname, 'logout': users.create_logout_url('/')}
template_path = 'templates/profile.html'
self.response.out.write(template.render(template_path,template_data))
else:
self.redirect('/createProfile')
else:
self.redirect(users.create_login_url('/'))
#######################################################################
## Artificial Creativity Engine
class DisplayEngine(webapp2.RequestHandler) :
def get(self):
user = users.get_current_user()
if user:
res = UserModel.query(UserModel.uid == user.user_id()).fetch()
if res:
ui = res[0]
template_data = {'nickname': ui.fname+ " " +ui.lname, 'logout': users.create_logout_url('/')}
template_path = 'templates/engine.html'
self.response.out.write(template.render(template_path,template_data))
else:
template_data = {'nickname': user.nickname(), 'logout': users.create_logout_url('/')}
template_path = 'templates/engine.html'
self.response.out.write(template.render(template_path,template_data))
else:
self.redirect(users.create_login_url('/'))
#######################################################################
## Data Analysis
class DisplayData(webapp2.RequestHandler) :
def get(self):
user = users.get_current_user()
if user:
res = UserModel.query(UserModel.uid == user.user_id()).fetch()
if res:
ui = res[0]
template_data = {'nickname': ui.fname+ " " +ui.lname, 'logout': users.create_logout_url('/')}
template_path = 'templates/data.html'
self.response.out.write(template.render(template_path,template_data))
else:
template_data = {'nickname': user.nickname(), 'logout': users.create_logout_url('/')}
template_path = 'templates/data.html'
self.response.out.write(template.render(template_path,template_data))
else:
self.redirect(users.create_login_url('/'))
#######################################################################
## Establish/Update User Profile
class UserModel(ndb.Model) :
uid = ndb.StringProperty(indexed=True)
fname = ndb.StringProperty(indexed = False)
lname = ndb.StringProperty(indexed = False)
words = ndb.StringProperty(indexed=False,repeated=True)
#######################################################################
## Establish/Update User Profile
# class CustomStreamListener(tweepy.StreamListener):
# def __init__(self, api):
# self.api = api
# super(tweepy.StreamListener, self).__init__()
# #setup rabbitMQ Connection
# self.connection = RabbitConnection(host='130.211.189.207', heartbeat=None, debug=True)
# self.channel = self.connection.channel()
# #set max queue size
# args = {"x-max-length": 2000}
# self.channel.queue.declare(queue='twitter_topic_feed', arguments=args)
# def on_status(self, status):
# print status.text, "\n"
# data = {}
# data['text'] = status.text
# data['created_at'] = time.mktime(status.created_at.timetuple())
# data['geo'] = status.geo
# data['source'] = status.source
# #queue the tweet
# self.channel.basic.publish(exchange='',
# routing_key='twitter_topic_feed',
# body=json.dumps(data))
# def on_error(self, status_code):
# print >> sys.stderr, 'Encountered error with status code:', status_code
# return True # Don't kill the stream
# def on_timeout(self):
# print >> sys.stderr, 'Timeout...'
# return True # Don't kill the stream
# sapi = tweepy.streaming.Stream(auth, CustomStreamListener(api))
# # my keyword today is chelsea as the team just had a big win
# sapi.filter(track=[self.request.get("inputData")])
#######################################################################
## Establish/Update User Profile
# class CustomStreamListener(tweepy.StreamListener):
# def __init__(self, api):
# self.api = api
# super(tweepy.StreamListener, self).__init__()
# #setup rabbitMQ Connection
# self.connection = RabbitConnection(host='130.211.189.207', heartbeat=None, debug=True)
# self.channel = self.connection.channel()
# #set max queue size
# args = {"x-max-length": 2000}
# self.channel.queue.declare(queue='twitter_topic_feed', arguments=args)
# def on_status(self, status):
# print status.text, "\n"
# data = {}
# data['text'] = status.text
# data['created_at'] = time.mktime(status.created_at.timetuple())
# data['geo'] = status.geo
# data['source'] = status.source
# #queue the tweet
# self.channel.basic.publish(exchange='',
# routing_key='twitter_topic_feed',
# body=json.dumps(data))
# def on_error(self, status_code):
# print >> sys.stderr, 'Encountered error with status code:', status_code
# return True # Don't kill the stream
# def on_timeout(self):
# print >> sys.stderr, 'Timeout...'
# return True # Don't kill the stream
# sapi = tweepy.streaming.Stream(auth, CustomStreamListener(api))
# # my keyword today is chelsea as the team just had a big win
# sapi.filter(track=[self.request.get("inputData")])
app = webapp2.WSGIApplication( [
('/', MainPage),
('/profile', ProfilePage),
('/createProfile', CreateProfile),
('/userRegister', ProcessUser),
('/getData', DataHandler),
('/getWords', WordsHandler),
('/data', DisplayData),
('/engine', DisplayEngine),
(decorator.callback_path, decorator.callback_handler()),
(bq_decorator.callback_path, bq_decorator.callback_handler())
], debug=True)
|
mit
| -4,567,955,119,498,473,000
| 34.443396
| 161
| 0.5895
| false
| 3.643152
| false
| false
| false
|
Nuevosmedios/ADL_LRS
|
adl_lrs/settings.py
|
1
|
7317
|
# Django settings for adl_lrs project.
from unipath import Path
# Root of LRS
SETTINGS_PATH = Path(__file__)
PROJECT_ROOT = SETTINGS_PATH.ancestor(3)
# If you want to debug
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'lrs',
'USER': 'root',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific sites and a single database can manage
# content for multiple sites.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = PROJECT_ROOT.child('media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# Where to be redirected after logging in
LOGIN_REDIRECT_URL = '/XAPI/me'
# Me view has a tab of user's statements
STMTS_PER_PAGE = 10
# Whether HTTP auth or OAuth is enabled
HTTP_AUTH_ENABLED = True
OAUTH_ENABLED = False
# OAuth callback views
OAUTH_AUTHORIZE_VIEW = 'oauth_provider.views.authorize_client'
OAUTH_CALLBACK_VIEW = 'oauth_provider.views.callback_view'
OAUTH_SIGNATURE_METHODS = ['plaintext','hmac-sha1','rsa-sha1']
OAUTH_REALM_KEY_NAME = 'http://localhost:8000/XAPI'
# Limit on number of statements the server will return
SERVER_STMT_LIMIT = 100
# ActivityID resolve timeout (seconds)
ACTIVITY_ID_RESOLVE_TIMEOUT = .2
# Caches for /more endpoint and attachments
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache_statement_list',
'TIMEOUT': 86400,
},
'attachment_cache':{
'BACKEND':'django.core.cache.backends.db.DatabaseCache',
'LOCATION':'attachment_cache',
},
}
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'v+m%^r0x)$_x8i3trn*duc6vd-yju0kx2b#9lk0sn2k^7cgyp5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages"
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'lrs.util.AllowOriginMiddleware.AllowOriginMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Main url router
ROOT_URLCONF = 'adl_lrs.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'adl_lrs.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'lrs',
'gunicorn',
'oauth_provider',
'django.contrib.admin',
'django_extensions'
)
REQUEST_HANDLER_LOG_DIR = SETTINGS_PATH.ancestor(3) + '/logs/lrs.log'
DEFAULT_LOG_DIR = SETTINGS_PATH.ancestor(3) + '/logs/django_request.log'
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': u'%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'standard': {
'format': u'%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'simple': {
'format': u'%(levelname)s %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': DEFAULT_LOG_DIR,
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': REQUEST_HANDLER_LOG_DIR,
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
},
'loggers': {
'lrs': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
}
}
|
apache-2.0
| -8,551,532,077,530,078,000
| 30.813043
| 101
| 0.66981
| false
| 3.616906
| false
| false
| false
|
tomasdubec/openstack-cinder
|
cinder/tests/scheduler/test_host_filters.py
|
1
|
6424
|
# Copyright 2011 OpenStack LLC. # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import httplib
import stubout
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.tests import utils as test_utils
from cinder import utils
DATA = ''
def stub_out_https_backend(stubs):
"""
Stubs out the httplib.HTTPRequest.getresponse to return
faked-out data instead of grabbing actual contents of a resource
The stubbed getresponse() returns an iterator over
the data "I am a teapot, short and stout\n"
:param stubs: Set of stubout stubs
"""
class FakeHTTPResponse(object):
def read(self):
return DATA
def fake_do_request(self, *args, **kwargs):
return httplib.OK, FakeHTTPResponse()
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_https_backend(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
classes = filter_handler.get_all_classes()
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(service):
return ret_value
self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 200,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 120,
'reserved_percentage': 20,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_infinite(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_unknown(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_pass(self):
# Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=2, hosts=['host2'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_fail(self):
# Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=1, hosts=['host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
apache-2.0
| -3,891,531,009,832,143,000
| 39.402516
| 78
| 0.603674
| false
| 4.144516
| true
| false
| false
|
mikalstill/ostrich
|
ostrich/stages/stage_40_enable_proxies.py
|
1
|
3000
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ostrich import steps
def get_steps(r):
"""Configure proxies."""
nextsteps = []
if r.complete['osa-branch'] == 'stable/mitaka':
if r.complete['http-proxy'] and r.complete['http-proxy'] != 'none':
local_servers = 'localhost,127.0.0.1'
if r.complete['local-cache'] != 'none':
local_servers += ',%s' % r.complete['local-cache']
r.kwargs['env'].update({'http_proxy': r.complete['http-proxy'],
'https_proxy': r.complete['http-proxy'],
'no_proxy': local_servers})
# This entry will only last until it is clobbered by ansible
nextsteps.append(
steps.FileAppendStep(
'proxy-environment',
'/etc/environment',
(('\n\nexport http_proxy="%(proxy)s"\n'
'export HTTP_PROXY="%(proxy)s"\n'
'export https_proxy="%(proxy)s"\n'
'export HTTPS_PROXY="%(proxy)s"\n'
'export ftp_proxy="%(proxy)s"\n'
'export FTP_PROXY="%(proxy)s"\n'
'export no_proxy=%(local)s\n'
'export NO_PROXY=%(local)sn')
% {'proxy': r.complete['http-proxy'],
'local': local_servers}),
**r.kwargs)
)
replacements = [
('(http|https|git)://github.com',
r.complete['git-mirror-github']),
('(http|https|git)://git.openstack.org',
r.complete['git-mirror-openstack']),
]
if r.complete['local-cache'] != 'none':
replacements.append(
('https://rpc-repo.rackspace.com',
'http://%s/rpc-repo.rackspace.com' % r.complete['local-cache'])
)
nextsteps.append(
steps.BulkRegexpEditorStep(
'bulk-edit-osa',
'/opt/openstack-ansible',
'.*\.(ini|yml|sh)$',
replacements,
**r.kwargs)
)
nextsteps.append(
steps.BulkRegexpEditorStep(
'unapply-git-mirrors-for-cgit',
'/opt/openstack-ansible',
'.*\.(ini|yml|sh)$',
[
('%s/cgit' % r.complete['git-mirror-openstack'],
'https://git.openstack.org/cgit')
],
**r.kwargs)
)
return nextsteps
|
apache-2.0
| -6,931,821,039,428,020,000
| 35.144578
| 76
| 0.515333
| false
| 4.137931
| false
| false
| false
|
pozetroninc/micropython
|
tests/float/float1.py
|
1
|
2194
|
# test basic float capabilities
# literals
print(.12)
print(1.)
print(1.2)
print(0e0)
print(0e+0)
print(0e-0)
# float construction
print(float(1.2))
print(float("1.2"))
print(float("+1"))
print(float("1e1"))
print(float("1e+1"))
print(float("1e-1"))
print(float("inf"))
print(float("-inf"))
print(float("INF"))
print(float("infinity"))
print(float("INFINITY"))
print(float("nan"))
print(float("-nan"))
print(float("NaN"))
try:
float("")
except ValueError:
print("ValueError")
try:
float("1e+")
except ValueError:
print("ValueError")
try:
float("1z")
except ValueError:
print("ValueError")
# construct from something with the buffer protocol
print(float(b"1.2"))
print(float(bytearray(b"3.4")))
# unary operators
print(bool(0.0))
print(bool(1.2))
print(+(1.2))
print(-(1.2))
# division of integers
x = 1 / 2
print(x)
# /= operator
a = 1
a /= 2
print(a)
# floor division
print(1.0 // 2)
print(2.0 // 2)
# comparison
print(1.2 <= 3.4)
print(1.2 <= -3.4)
print(1.2 >= 3.4)
print(1.2 >= -3.4)
print(0.0 == False, 1.0 == True)
print(False == 0.0, True == 1.0)
# comparison of nan is special
nan = float('nan')
print(nan == 1.2)
print(nan == nan)
try:
1.0 / 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1.0 // 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1.2 % 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
0.0 ** -1
except ZeroDivisionError:
print("ZeroDivisionError")
# unsupported unary ops
try:
~1.2
except TypeError:
print("TypeError")
try:
1.2 in 3.4
except TypeError:
print("TypeError")
# small int on LHS, float on RHS, unsupported op
try:
print(1 | 1.0)
except TypeError:
print('TypeError')
# can't convert list to float
try:
float([])
except TypeError:
print("TypeError")
# test constant float with more than 255 chars
x = 1.84728699436059052516398251149631771898472869943605905251639825114963177189847286994360590525163982511496317718984728699436059052516398251149631771898472869943605905251639825114963177189847286994360590525163982511496317718984728699436059052516398251149631771898472869943605905251639825114963177189
print("%.5f" % x)
|
mit
| 6,278,333,151,221,868,000
| 17.132231
| 302
| 0.691431
| false
| 2.752823
| false
| true
| false
|
ChaoticEvil/django_base
|
src/apps/pages/admin.py
|
1
|
4774
|
from django.contrib import admin
from .models import Page, Menu
from django.shortcuts import render, HttpResponse
from mptt.exceptions import InvalidMove
from apps.feincms.admin import tree_editor
from django_base.settings import SITE_NAME
from django.utils.translation import ugettext_lazy as _
from .forms import MenuParentForm
class PageAdmin(admin.ModelAdmin):
fieldsets = [
(_('Метаинформация'), {
'classes': ('collapse',),
'fields': [
'mdesc', 'mkeys', 'mtitle'
],
}),
(_('Содержимое страницы'), {
'fields': [
'title', 'body'
],
}),
(_('Шаблон и статус'), {
'fields': [
('template', 'is_public', 'is_index')
]
}),
(_('Галерея'), {
'fields': [
'gallery'
]
}),
]
raw_id_fields = ('gallery',)
autocomplete_lookup_fields = {'fk': ['gallery']}
list_display = ('id', 'title', 'is_public', 'last_update')
list_display_links = ('id', 'title')
list_editable = ('is_public',)
list_per_page = 100
sortable_field_name = 'title'
search_fields = ['title', 'body']
list_filter = ['last_update', 'is_public', 'is_index']
class MenuAdmin(tree_editor.TreeEditor):
fieldsets = [
(_('Пункт меню'), {
'fields': [
('title', 'slug'), 'parent', 'external_link', 'page',
('is_visible', 'is_separate')
]
}),
]
prepopulated_fields = {'slug': ('title',)}
raw_id_fields = ('parent', 'page')
autocomplete_lookup_fields = {'fk': ['page']}
list_display = ('title', 'get_link', 'position', 'is_visible', 'is_separate')
list_display_links = ('title',)
list_editable = ('position', 'is_visible', 'is_separate')
list_per_page = 100
sortable_field_name = 'position'
search_fields = ['title', 'slug']
list_filter = ['is_visible']
actions = ['inc_position_action', 'dec_position_action', 'set_parent_action']
def get_title(self, obj):
return '%s (%s)' % (obj.title, obj.slug)
get_title.short_description = _('Название (ссылка)')
def get_link(self, obj):
return obj.external_link if obj.external_link else '%s/pages/%s/' % (SITE_NAME, obj.slug)
get_link.short_description = _('Ссылка')
def inc_position_action(self, request, queryset):
for q in queryset:
q.inc_position()
self.message_user(request, _('У выбранных Вами страниц была увеличина позиция на 1'))
inc_position_action.short_description = _('Увеличить порядок следования у выбранных елементов')
def dec_position_action(self, request, queryset):
for q in queryset:
q.dec_position()
self.message_user(request, _('У выбранных Вами страниц была уменьшена позиция на 1'))
dec_position_action.short_description = _('Уменьшить порядок следования у выбранных елементов')
def set_parent_action(self, request, queryset):
if 'do_action' in request.POST:
form = MenuParentForm(request.POST)
if form.is_valid():
for q in queryset:
try:
q.move_to(form.cleaned_data['page'])
except InvalidMove as e:
return HttpResponse(
_(
'''Ошибка!<br>
%s<br><br>
<a href='/admin/'>Назад в админку</a>'''
) % e,
content_type='text/html'
)
Menu.objects.rebuild()
return # Ничего не возвращаем, это вернет нас на список товаров
else:
form = MenuParentForm()
return render(
request,
'admin/set_parent.html',
{
'title': _('Укажите родительский пункт меню, под который нужно переместить выбранные страницы'),
'objects': queryset, 'form': form
}
)
set_parent_action.short_description = _('Переместить выбранные страницы в родительскую категорию')
admin.site.register(Page, PageAdmin)
admin.site.register(Menu, MenuAdmin)
|
gpl-3.0
| -8,807,315,346,340,670,000
| 33.935484
| 112
| 0.537627
| false
| 3.164354
| false
| false
| false
|
skylina/test
|
public/BasePage.py
|
1
|
6311
|
# -*- coding: utf-8 -*-
from selenium.webdriver.support.wait import WebDriverWait
from selenium import webdriver
__author__ = 'lina'
import time
import sys
import xlrd.sheet
import time, os
class Action:
"""
BasePage封装所有页面都公用的方法,例如driver, url
"""
driver = None
# 初始化driver、url、等
def __init__(self, base_url=None, pagetitle=None):
self.base_url = base_url
self.pagetitle = pagetitle
# self.driver = webdriver.Firefox()
# self.driver.implicitly_wait(30)
# self.driver = driver
"""
通过传参选择启动浏览器
# self.browser = "Firefox" #传入浏览器对象
# if Action.driver == None:
# if self.browser.upper() == 'IE': Action.driver = webdriver.Ie()
# elif self.browser.upper() == 'CHROME': Action.driver = webdriver.Chrome()
# elif self.browser.upper() == 'FIREFOX': Action.driver = webdriver.Firefox()
# elif self.browser.upper() == 'SAFARI': Action.driver = webdriver.Safari()
# else: Action.driver = webdriver.Ie()
# Action.driver.maximize_window()
# #pass
# #print u"加载浏览器驱动失败!"
# self.driver = Action.driver
self.verificationErrors = []
"""
# 打开页面,校验页面链接是否加载正确
def _open(self, url, pagetitle):
# 使用get打开访问链接地址
self.driver.get(url)
self.driver.maximize_window()
# 使用assert进行校验,打开的链接地址是否与配置的地址一致。调用on_page()方法
assert self.on_page(pagetitle), u"打开开页面失败 %s" % url
# 重写元素定位方法
def find_element(self, *loc):
# return self.driver.find_element(*loc)
try:
WebDriverWait(self.driver, 10).until(lambda driver: driver.find_element(*loc).is_displayed())
return self.driver.find_element(*loc)
except:
print (u"%s 页面中未能找到 %s 元素" % (self, loc))
# 重写一组元素定位方法
def find_elements(self, *loc):
# return self.driver.find_element(*loc)
try:
if len(self.driver.find_elements(*loc)):
return self.driver.find_elements(*loc)
except:
print (u"%s 页面中未能找到 %s 元素" % (self, loc))
# 定位一组元素中索引为第i个的元素 i从0开始
def find_elements_i(self, loc, index=None):
# return self.driver.find_element(*loc)
try:
if len(self.driver.find_elements(*loc)):
return self.driver.find_elements(*loc)[index]
except:
print (u"%s 页面中未能找到%s的第 %s 个元素 " % (self, loc, index))
# 重写switch_frame方法
def switch_frame(self, loc):
return self.driver.switch_to_frame(loc)
# 定义open方法,调用_open()进行打开链接
def open(self):
self._open(self.base_url, self.pagetitle)
# 使用current_url获取当前窗口Url地址,进行与配置地址作比较,返回比较结果(True False)
def on_page(self, pagetitle):
return pagetitle in self.driver.title
# 定义script方法,用于执行js脚本,范围执行结果
def script(self, src):
self.driver.execute_script(src)
# 重写定义send_keys方法
def send_keys(self, loc, vaule, clear_first=True, click_first=True):
try:
if click_first:
self.find_element(*loc).click()
if clear_first:
self.find_element(*loc).clear()
self.find_element(*loc).send_keys(vaule)
except AttributeError:
print (u"%s 页面中未能找到 %s 元素" % (self, loc))
def checkTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
self.saveScreenshot(self.driver, "Error")
raise msg
else:
return False
# 读取excel文件的table
def setTable(self, filepath, sheetname):
"""
filepath:文件路径
sheetname:Sheet名称
"""
data = xlrd.open_workbook(filepath)
# 通过索引顺序获取Excel表
table = data.sheet_by_name(sheetname)
return table
# 读取xls表格,使用生成器yield进行按行存储
def getTabledata(self, filepath, sheetname):
"""
filepath:文件路径
sheetname:Sheet名称
"""
table = self.setTable(filepath, sheetname)
for args in range(1, table.nrows):
# 使用生成器 yield
yield table.row_values(args)
# 获取单元格数据
def getcelldata(self, sheetname, RowNum, ColNum):
"""
sheetname:表格Sheets名称
RowNum:行号 从0开始
ColNum:列号 从0开始
"""
table = self.setTable(sheetname=sheetname)
celldata = table.cell_value(RowNum, ColNum)
return celldata
# 读取元素标签和元素唯一标识
def locate(self, index, filepath="dataEngine\\data.xls", sheetname="element"):
"""
filepath: 文件路径
sheetno:Sheet编号
index: 元素编号
返回值内容为:("id","inputid")、("xpath","/html/body/header/div[1]/nav")格式
"""
table = self.setTable(filepath, sheetname)
for i in range(1, table.nrows):
if index in table.row_values(i):
return table.row_values(i)[1:3]
# savePngName:生成图片的名称
def savePngName(self, name):
"""
name:自定义图片的名称
"""
day = time.strftime('%Y-%m-%d', time.localtime(time.time()))
fp = "result\\" + day + "\\image"
tm = self.saveTime()
type = ".png"
# 判断存放截图的目录是否存在,如果存在打印并返回目录名称,如果不存在,创建该目录后,再返回目录名称
if os.path.exists(fp):
filename = str(fp) + "\\" + str(tm) + str(" ") + str(name) + str(type)
print (filename)
# print "True"
return filename
else:
os.makedirs(fp)
filename = str(fp) + "\\" + str(tm) + str(" ") + str(name) + str(type)
print (filename)
# print "False"
return filename
# 获取系统当前时间
def saveTime(self):
"""
返回当前系统时间以括号中(2015-11-25 15_21_55)展示
"""
return time.strftime('%Y-%m-%d %H_%M_%S', time.localtime(time.time()))
# saveScreenshot:通过图片名称,进行截图保存
def saveScreenshot(self, driver , name):
"""
快照截图
name:图片名称
"""
# 获取当前路径
# print os.getcwd()
image = self.driver.save_screenshot(self.savePngName(name))
return image
def save_img(self, img_name):
pic_name = self.savePngName(img_name)
print(pic_name)
#filepath = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), pic_name)
#print(filepath)
self.driver.save_screenshot(pic_name)
time.sleep(5)
|
gpl-2.0
| -5,157,220,809,121,450,000
| 25.131707
| 99
| 0.673138
| false
| 2.106567
| false
| false
| false
|
matrix-org/synapse
|
synapse/metrics/background_process_metrics.py
|
1
|
10401
|
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from functools import wraps
from typing import TYPE_CHECKING, Dict, Optional, Set, Union
from prometheus_client.core import REGISTRY, Counter, Gauge
from twisted.internet import defer
from synapse.logging.context import LoggingContext, PreserveLoggingContext
from synapse.logging.opentracing import (
SynapseTags,
noop_context_manager,
start_active_span,
)
from synapse.util.async_helpers import maybe_awaitable
if TYPE_CHECKING:
import resource
logger = logging.getLogger(__name__)
_background_process_start_count = Counter(
"synapse_background_process_start_count",
"Number of background processes started",
["name"],
)
_background_process_in_flight_count = Gauge(
"synapse_background_process_in_flight_count",
"Number of background processes in flight",
labelnames=["name"],
)
# we set registry=None in all of these to stop them getting registered with
# the default registry. Instead we collect them all via the CustomCollector,
# which ensures that we can update them before they are collected.
#
_background_process_ru_utime = Counter(
"synapse_background_process_ru_utime_seconds",
"User CPU time used by background processes, in seconds",
["name"],
registry=None,
)
_background_process_ru_stime = Counter(
"synapse_background_process_ru_stime_seconds",
"System CPU time used by background processes, in seconds",
["name"],
registry=None,
)
_background_process_db_txn_count = Counter(
"synapse_background_process_db_txn_count",
"Number of database transactions done by background processes",
["name"],
registry=None,
)
_background_process_db_txn_duration = Counter(
"synapse_background_process_db_txn_duration_seconds",
(
"Seconds spent by background processes waiting for database "
"transactions, excluding scheduling time"
),
["name"],
registry=None,
)
_background_process_db_sched_duration = Counter(
"synapse_background_process_db_sched_duration_seconds",
"Seconds spent by background processes waiting for database connections",
["name"],
registry=None,
)
# map from description to a counter, so that we can name our logcontexts
# incrementally. (It actually duplicates _background_process_start_count, but
# it's much simpler to do so than to try to combine them.)
_background_process_counts = {} # type: Dict[str, int]
# Set of all running background processes that became active active since the
# last time metrics were scraped (i.e. background processes that performed some
# work since the last scrape.)
#
# We do it like this to handle the case where we have a large number of
# background processes stacking up behind a lock or linearizer, where we then
# only need to iterate over and update metrics for the process that have
# actually been active and can ignore the idle ones.
_background_processes_active_since_last_scrape = set() # type: Set[_BackgroundProcess]
# A lock that covers the above set and dict
_bg_metrics_lock = threading.Lock()
class _Collector:
"""A custom metrics collector for the background process metrics.
Ensures that all of the metrics are up-to-date with any in-flight processes
before they are returned.
"""
def collect(self):
global _background_processes_active_since_last_scrape
# We swap out the _background_processes set with an empty one so that
# we can safely iterate over the set without holding the lock.
with _bg_metrics_lock:
_background_processes_copy = _background_processes_active_since_last_scrape
_background_processes_active_since_last_scrape = set()
for process in _background_processes_copy:
process.update_metrics()
# now we need to run collect() over each of the static Counters, and
# yield each metric they return.
for m in (
_background_process_ru_utime,
_background_process_ru_stime,
_background_process_db_txn_count,
_background_process_db_txn_duration,
_background_process_db_sched_duration,
):
for r in m.collect():
yield r
REGISTRY.register(_Collector())
class _BackgroundProcess:
def __init__(self, desc, ctx):
self.desc = desc
self._context = ctx
self._reported_stats = None
def update_metrics(self):
"""Updates the metrics with values from this process."""
new_stats = self._context.get_resource_usage()
if self._reported_stats is None:
diff = new_stats
else:
diff = new_stats - self._reported_stats
self._reported_stats = new_stats
_background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
_background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
_background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
_background_process_db_txn_duration.labels(self.desc).inc(
diff.db_txn_duration_sec
)
_background_process_db_sched_duration.labels(self.desc).inc(
diff.db_sched_duration_sec
)
def run_as_background_process(desc: str, func, *args, bg_start_span=True, **kwargs):
"""Run the given function in its own logcontext, with resource metrics
This should be used to wrap processes which are fired off to run in the
background, instead of being associated with a particular request.
It returns a Deferred which completes when the function completes, but it doesn't
follow the synapse logcontext rules, which makes it appropriate for passing to
clock.looping_call and friends (or for firing-and-forgetting in the middle of a
normal synapse async function).
Args:
desc: a description for this background process type
func: a function, which may return a Deferred or a coroutine
bg_start_span: Whether to start an opentracing span. Defaults to True.
Should only be disabled for processes that will not log to or tag
a span.
args: positional args for func
kwargs: keyword args for func
Returns: Deferred which returns the result of func, but note that it does not
follow the synapse logcontext rules.
"""
async def run():
with _bg_metrics_lock:
count = _background_process_counts.get(desc, 0)
_background_process_counts[desc] = count + 1
_background_process_start_count.labels(desc).inc()
_background_process_in_flight_count.labels(desc).inc()
with BackgroundProcessLoggingContext(desc, count) as context:
try:
if bg_start_span:
ctx = start_active_span(
f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
)
else:
ctx = noop_context_manager()
with ctx:
return await maybe_awaitable(func(*args, **kwargs))
except Exception:
logger.exception(
"Background process '%s' threw an exception",
desc,
)
finally:
_background_process_in_flight_count.labels(desc).dec()
with PreserveLoggingContext():
# Note that we return a Deferred here so that it can be used in a
# looping_call and other places that expect a Deferred.
return defer.ensureDeferred(run())
def wrap_as_background_process(desc):
"""Decorator that wraps a function that gets called as a background
process.
Equivalent of calling the function with `run_as_background_process`
"""
def wrap_as_background_process_inner(func):
@wraps(func)
def wrap_as_background_process_inner_2(*args, **kwargs):
return run_as_background_process(desc, func, *args, **kwargs)
return wrap_as_background_process_inner_2
return wrap_as_background_process_inner
class BackgroundProcessLoggingContext(LoggingContext):
"""A logging context that tracks in flight metrics for background
processes.
"""
__slots__ = ["_proc"]
def __init__(self, name: str, instance_id: Optional[Union[int, str]] = None):
"""
Args:
name: The name of the background process. Each distinct `name` gets a
separate prometheus time series.
instance_id: an identifer to add to `name` to distinguish this instance of
the named background process in the logs. If this is `None`, one is
made up based on id(self).
"""
if instance_id is None:
instance_id = id(self)
super().__init__("%s-%s" % (name, instance_id))
self._proc = _BackgroundProcess(name, self)
def start(self, rusage: "Optional[resource._RUsage]"):
"""Log context has started running (again)."""
super().start(rusage)
# We've become active again so we make sure we're in the list of active
# procs. (Note that "start" here means we've become active, as opposed
# to starting for the first time.)
with _bg_metrics_lock:
_background_processes_active_since_last_scrape.add(self._proc)
def __exit__(self, type, value, traceback) -> None:
"""Log context has finished."""
super().__exit__(type, value, traceback)
# The background process has finished. We explicitly remove and manually
# update the metrics here so that if nothing is scraping metrics the set
# doesn't infinitely grow.
with _bg_metrics_lock:
_background_processes_active_since_last_scrape.discard(self._proc)
self._proc.update_metrics()
|
apache-2.0
| 8,190,291,921,315,463,000
| 34.742268
| 87
| 0.665128
| false
| 4.150439
| false
| false
| false
|
projectexpert/pmis
|
analytic_resource_plan_stock/models/analytic_resource_plan_line.py
|
1
|
5846
|
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
import odoo.addons.decimal_precision as dp
class AnalyticResourcePlanLine(models.Model):
_inherit = 'analytic.resource.plan.line'
@api.multi
def _compute_quantities(self):
for line in self:
stock = line.with_context(
analytic_account_id=line.account_id.id).product_id.\
_product_available()
if stock.get(line.product_id.id, False):
line.incoming_qty = stock[line.product_id.id]['incoming_qty']
line.outgoing_qty = stock[line.product_id.id]['outgoing_qty']
line.virtual_available = \
stock[line.product_id.id]['virtual_available']
line.qty_available = stock[line.product_id.id]['qty_available']
else:
line.incoming_qty = 0.0
line.outgoing_qty = 0.0
line.virtual_available = 0.0
line.qty_available = 0.0
@api.multi
def _compute_done_quantities(self):
for line in self:
stock = line.with_context(
analytic_account_id_out=line.account_id.id).product_id.\
_product_available()
if stock.get(line.product_id.id, False):
# available in customer means done
line.outgoing_done_qty = (
stock[line.product_id.id]['qty_available'])
else:
line.outgoing_done_qty = 0.0
line.incoming_done_qty = (line.qty_available - line.outgoing_qty
- line.outgoing_done_qty)
qty_available = fields.Float(
string='Qty Available',
digits=dp.get_precision('Product Unit of Measure'),
compute='_compute_quantities',
help="Current quantity of products. "
"In a context with a single Stock Location, this includes "
"goods stored at this Location, or any of its children. "
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, "
"or any of its children. "
"In a context with a single Shop, this includes goods "
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children. "
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."
)
virtual_available = fields.Float(
string='Virtually available',
compute='_compute_quantities',
digits=dp.get_precision('Product Unit of Measure'),
help="Forecast quantity (computed as Quantity On Hand "
"- Outgoing + Incoming) "
"In a context with a single Stock Location, this includes "
"goods stored in this location, or any of its children. "
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, "
"or any of its children. "
"In a context with a single Shop, this includes goods "
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children. "
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."
)
incoming_qty = fields.Float(
string='Qty Incoming',
digits=dp.get_precision('Product Unit of Measure'),
compute='_compute_quantities',
help="Quantity of products that are planned to arrive. "
"In a context with a single Stock Location, this includes "
"goods arriving to this Location, or any of its children. "
"In a context with a single Warehouse, this includes "
"goods arriving to the Stock Location of this Warehouse, or "
"any of its children. "
"In a context with a single Shop, this includes goods "
"arriving to the Stock Location of the Warehouse of this "
"Shop, or any of its children. "
"Otherwise, this includes goods arriving to any Stock "
"Location with 'internal' type."
)
outgoing_qty = fields.Float(
string='Outgoing quantity',
default=lambda self: self.unit_amount,
compute='_compute_quantities',
digits=dp.get_precision('Product Unit of Measure'),
help="Quantity of products that are planned to leave. "
"In a context with a single Stock Location, this includes "
"goods leaving this Location, or any of its children. "
"In a context with a single Warehouse, this includes "
"goods leaving the Stock Location of this Warehouse, or "
"any of its children. "
"In a context with a single Shop, this includes goods "
"leaving the Stock Location of the Warehouse of this "
"Shop, or any of its children. "
"Otherwise, this includes goods leaving any Stock "
"Location with 'internal' type."
)
incoming_done_qty = fields.Float(
string='Qty Incoming Done',
digits=dp.get_precision('Product Unit of Measure'),
compute='_compute_done_quantities',
help="Quantity of products that have been produced or have "
"arrived."
)
outgoing_done_qty = fields.Float(
string='Qty Outgoing Done',
default=lambda self: self.unit_amount,
compute='_compute_done_quantities',
digits=dp.get_precision('Product Unit of Measure'),
help="Quantity of products that have been consumed or delivered."
)
|
agpl-3.0
| 5,313,668,238,580,680,000
| 46.145161
| 79
| 0.594595
| false
| 4.320769
| false
| false
| false
|
silkentrance/django-db-mixins
|
django_mixins/base.py
|
1
|
1270
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Carsten Klein <trancesilken@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import six
import django.db.models.base as djbase
from .utils import InstanceDescriptorMixin
from .auth import CurrentUser
class ModelBase(djbase.Model):
class Meta:
abstract = True
class MixinMeta(djbase.ModelBase):
def __new__(cls, name, bases, attrs):
# all mixins are abstract
supernew = super(MixinMeta, cls).__new__
class Meta:
abstract = True
attrs['Meta'] = Meta
return supernew(cls, name, bases, attrs)
class MixinBase(djbase.Model, InstanceDescriptorMixin, six.with_metaclass(MixinMeta)):
class Meta:
abstract = True
|
apache-2.0
| 6,584,541,726,012,519,000
| 21.678571
| 86
| 0.692913
| false
| 3.825301
| false
| false
| false
|
ntt-sic/heat
|
heat/common/identifier.py
|
1
|
8227
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import collections
from heat.openstack.common import strutils
from heat.openstack.common.py3kcompat import urlutils
class HeatIdentifier(collections.Mapping):
FIELDS = (
TENANT, STACK_NAME, STACK_ID, PATH
) = (
'tenant', 'stack_name', 'stack_id', 'path'
)
path_re = re.compile(r'stacks/([^/]+)/([^/]+)(.*)')
def __init__(self, tenant, stack_name, stack_id, path=''):
'''
Initialise a HeatIdentifier from a Tenant ID, Stack name, Stack ID
and optional path. If a path is supplied and it does not begin with
"/", a "/" will be prepended.
'''
if path and not path.startswith('/'):
path = '/' + path
if '/' in stack_name:
raise ValueError(_('Stack name may not contain "/"'))
self.identity = {
self.TENANT: tenant,
self.STACK_NAME: stack_name,
self.STACK_ID: str(stack_id),
self.PATH: path,
}
@classmethod
def from_arn(cls, arn):
'''
Return a new HeatIdentifier generated by parsing the supplied ARN.
'''
fields = arn.split(':')
if len(fields) < 6 or fields[0].lower() != 'arn':
raise ValueError(_('"%s" is not a valid ARN') % arn)
id_fragment = ':'.join(fields[5:])
path = cls.path_re.match(id_fragment)
if fields[1] != 'openstack' or fields[2] != 'heat' or not path:
raise ValueError(_('"%s" is not a valid Heat ARN') % arn)
return cls(urlutils.unquote(fields[4]),
urlutils.unquote(path.group(1)),
urlutils.unquote(path.group(2)),
urlutils.unquote(path.group(3)))
@classmethod
def from_arn_url(cls, url):
'''
Return a new HeatIdentifier generated by parsing the supplied URL
The URL is expected to contain a valid arn as part of the path
'''
# Sanity check the URL
urlp = urlutils.urlparse(url)
if (urlp.scheme not in ('http', 'https') or
not urlp.netloc or not urlp.path):
raise ValueError(_('"%s" is not a valid URL') % url)
# Remove any query-string and extract the ARN
arn_url_prefix = '/arn%3Aopenstack%3Aheat%3A%3A'
match = re.search(arn_url_prefix, urlp.path, re.IGNORECASE)
if match is None:
raise ValueError(_('"%s" is not a valid ARN URL') % url)
# the +1 is to skip the leading /
url_arn = urlp.path[match.start() + 1:]
arn = urlutils.unquote(url_arn)
return cls.from_arn(arn)
def arn(self):
'''
Return an ARN of the form:
arn:openstack:heat::<tenant>:stacks/<stack_name>/<stack_id><path>
'''
return 'arn:openstack:heat::%s:%s' % (urlutils.quote(self.tenant, ''),
self._tenant_path())
def arn_url_path(self):
'''
Return an ARN quoted correctly for use in a URL
'''
return '/' + urlutils.quote(self.arn(), '')
def url_path(self):
'''
Return a URL-encoded path segment of a URL in the form:
<tenant>/stacks/<stack_name>/<stack_id><path>
'''
return '/'.join((urlutils.quote(self.tenant, ''), self._tenant_path()))
def _tenant_path(self):
'''
Return a URL-encoded path segment of a URL within a particular tenant,
in the form:
stacks/<stack_name>/<stack_id><path>
'''
return 'stacks/%s/%s%s' % (urlutils.quote(self.stack_name, ''),
urlutils.quote(self.stack_id, ''),
urlutils.quote(strutils.safe_encode(
self.path)))
def _path_components(self):
'''Return a list of the path components.'''
return self.path.lstrip('/').split('/')
def __getattr__(self, attr):
'''
Return one of the components of the identity when accessed as an
attribute.
'''
if attr not in self.FIELDS:
raise AttributeError(_('Unknown attribute "%s"') % attr)
return self.identity[attr]
def __getitem__(self, key):
'''Return one of the components of the identity.'''
if key not in self.FIELDS:
raise KeyError(_('Unknown attribute "%s"') % key)
return self.identity[key]
def __len__(self):
'''Return the number of components in an identity.'''
return len(self.FIELDS)
def __contains__(self, key):
return key in self.FIELDS
def __iter__(self):
return iter(self.FIELDS)
def __repr__(self):
return repr(dict(self))
class ResourceIdentifier(HeatIdentifier):
'''An identifier for a resource.'''
RESOURCE_NAME = 'resource_name'
def __init__(self, tenant, stack_name, stack_id, path,
resource_name=None):
'''
Return a new Resource identifier based on the identifier components of
the owning stack and the resource name.
'''
if resource_name is not None:
if '/' in resource_name:
raise ValueError(_('Resource name may not contain "/"'))
path = '/'.join([path.rstrip('/'), 'resources', resource_name])
super(ResourceIdentifier, self).__init__(tenant,
stack_name,
stack_id,
path)
def __getattr__(self, attr):
'''
Return one of the components of the identity when accessed as an
attribute.
'''
if attr == self.RESOURCE_NAME:
return self._path_components()[-1]
return HeatIdentifier.__getattr__(self, attr)
def stack(self):
'''
Return a HeatIdentifier for the owning stack
'''
return HeatIdentifier(self.tenant, self.stack_name, self.stack_id,
'/'.join(self._path_components()[:-2]))
class EventIdentifier(HeatIdentifier):
'''An identifier for an event.'''
(RESOURCE_NAME, EVENT_ID) = (ResourceIdentifier.RESOURCE_NAME, 'event_id')
def __init__(self, tenant, stack_name, stack_id, path,
event_id=None):
'''
Return a new Event identifier based on the identifier components of
the associated resource and the event ID.
'''
if event_id is not None:
path = '/'.join([path.rstrip('/'), 'events', event_id])
super(EventIdentifier, self).__init__(tenant,
stack_name,
stack_id,
path)
def __getattr__(self, attr):
'''
Return one of the components of the identity when accessed as an
attribute.
'''
if attr == self.RESOURCE_NAME:
return getattr(self.resource(), attr)
if attr == self.EVENT_ID:
return self._path_components()[-1]
return HeatIdentifier.__getattr__(self, attr)
def resource(self):
'''
Return a HeatIdentifier for the owning resource
'''
return ResourceIdentifier(self.tenant, self.stack_name, self.stack_id,
'/'.join(self._path_components()[:-2]))
def stack(self):
'''
Return a HeatIdentifier for the owning stack
'''
return self.resource().stack()
|
apache-2.0
| 7,868,432,910,404,716,000
| 33.279167
| 79
| 0.54467
| false
| 4.287129
| false
| false
| false
|
kensho-technologies/graphql-compiler
|
graphql_compiler/backend.py
|
1
|
1810
|
# Copyright 2019-present Kensho Technologies, LLC.
from collections import namedtuple
from .compiler import (
emit_cypher,
emit_gremlin,
emit_match,
emit_sql,
ir_lowering_cypher,
ir_lowering_gremlin,
ir_lowering_match,
ir_lowering_sql,
)
from .schema import schema_info
# A backend is a compilation target (a language we can compile to)
#
# This class defines all the necessary and sufficient functionality a backend should implement
# in order to fit into our generic testing framework.
Backend = namedtuple(
"Backend",
(
# String, the internal name of this language.
"language",
# The subclass of SchemaInfo appropriate for this backend.
"SchemaInfoClass",
# Given a SchemaInfoClass and an IR that respects its schema, return a lowered IR with
# the same semantics.
"lower_func",
# Given a SchemaInfoClass and a lowered IR that respects its schema, emit a query
# in this language with the same semantics.
"emit_func",
),
)
gremlin_backend = Backend(
language="Gremlin",
SchemaInfoClass=schema_info.CommonSchemaInfo,
lower_func=ir_lowering_gremlin.lower_ir,
emit_func=emit_gremlin.emit_code_from_ir,
)
match_backend = Backend(
language="MATCH",
SchemaInfoClass=schema_info.CommonSchemaInfo,
lower_func=ir_lowering_match.lower_ir,
emit_func=emit_match.emit_code_from_ir,
)
cypher_backend = Backend(
language="Cypher",
SchemaInfoClass=schema_info.CommonSchemaInfo,
lower_func=ir_lowering_cypher.lower_ir,
emit_func=emit_cypher.emit_code_from_ir,
)
sql_backend = Backend(
language="SQL",
SchemaInfoClass=schema_info.SQLAlchemySchemaInfo,
lower_func=ir_lowering_sql.lower_ir,
emit_func=emit_sql.emit_code_from_ir,
)
|
apache-2.0
| -8,970,847,914,919,115,000
| 27.28125
| 94
| 0.699448
| false
| 3.494208
| false
| false
| false
|
JYamihud/blender-organizer
|
py_data/modules/itemselector.py
|
1
|
11370
|
# -*- coding: utf-8 -*-
# system
import os
import sys
import socket
# graphics interface
import gtk
import pango
import cairo
import glib
import datetime
try:
import Image
except:
from PIL import Image
# calculational help
import datetime
# self made modules
import thumbnailer
import dialogs
import checklist
def select(pf, searchitem=""):
window = gtk.Dialog("Choose Item", None, 0, (gtk.STOCK_OK, gtk.RESPONSE_APPLY,
gtk.STOCK_CANCEL, gtk.RESPONSE_CLOSE))
box = window.get_child()
namebox = gtk.HBox(False)
box.pack_start(namebox, False)
namebox.pack_start(gtk.Label(" Search: "), False)
nameentry = gtk.Entry()
nameentry.set_text(searchitem)
namebox.pack_start(nameentry)
finalname = gtk.Entry()
finalname.set_text("")
#namebox.pack_end(finalname, False)
class draw:
def __init__(self, pf, box , win, search, finalname):
self.box = box
self.win = win
self.pf = pf
self.search = search
self.finalname = finalname
self.allowed = True
self.scroll = 0
self.dW = 0
self.DH = 0
self.mpx = 0
self.mpy = 0
self.mpf = ""
self.frame = 0
# LET'S PREPARE ALL THE ITEMS
self.listofitems = []
for CUR in ["chr", "veh", "loc", "obj"]:
print self.pf+"/dev/"+CUR
for i in os.walk(self.pf+"/dev/"+CUR).next()[1]:
self.listofitems.append([CUR,i])
self.objicon = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/obj_asset_undone.png")
self.chricon = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/chr_asset_undone.png")
self.vehicon = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/veh_asset_undone.png")
self.locicon = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/loc_asset_undone.png")
self.plus = gtk.gdk.pixbuf_new_from_file(self.pf+"/py_data/icons/plus.png")
def framegraph(widget, event):
self.frame = self.frame + 1
w, h = widget.window.get_size()
xgc = widget.window.new_gc()
mx, my, fx = widget.window.get_pointer()
# GETTING WHETHER THE WINDOW IS ACTIVE
self.winactive = self.win.is_active()
ctx = widget.window.cairo_create()
#ctx.select_font_face("Sawasdee", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
ctx.set_source_rgb(1,1,1)
xgc.line_width = 2
# BACKGROUND COLOR
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#2b2b2b")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, 0, 0, w, h) ## FILL FRAME
#IF WE SEARCH
showlist = self.listofitems
if self.search.get_text() > 0:
showlist = []
for i in self.listofitems:
if self.search.get_text().lower() in i[0].lower() or self.search.get_text().lower() in i[1].lower():
showlist.append(i)
# SCROLL SO I COULD DO THAT
S = self.scroll # the scroll value
# OUTPUTTING THEM TO THE SCREEN
n = 0
i = ["", ""]
sett = True
hoti = 0
al = 0
foundsearch = False
for n, i in enumerate(sorted(showlist)):
if self.search.get_text() in ["chr", "veh", "obj", "loc", i[1]]:
foundsearch = True
if self.search.get_text() == i[1] and self.frame < 3:
self.finalname.set_text("/dev/"+i[0]+"/"+i[1])
hoti = (20*n)+S # HEIGHT OF THIS ITEM
#every even darker
if (n % 2) == 0:
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#262626")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, 0, hoti+2, w, 20)
#mouse over
if my in range(hoti+2, hoti+22) and my in range(0, h) and mx in range(0,w):
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#3f3f3f")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, 0, hoti+2, w, 20)
if "GDK_BUTTON1" in str(fx) and self.allowed and "GDK_BUTTON1" not in str(self.mpf) and win.is_active() and sett: #IF CLICKED
self.finalname.set_text("/dev/"+i[0]+"/"+i[1])
sett = False
# if selected 395384
if "/dev/"+i[0]+"/"+i[1] == self.finalname.get_text():
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#395384")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, 0, hoti+2, w, 20)
ctx.set_font_size(15)
ctx.move_to( 30, hoti+17)
ctx.show_text(i[1])
#drawing icons
if i[0] == "chr":
widget.window.draw_pixbuf(None, self.chricon, 0, 0, 1, hoti+2, -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
elif i[0] == "veh":
widget.window.draw_pixbuf(None, self.vehicon, 0, 0, 1, hoti+2, -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
elif i[0] == "loc":
widget.window.draw_pixbuf(None, self.locicon, 0, 0, 1, hoti+2, -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
elif i[0] == "obj":
widget.window.draw_pixbuf(None, self.objicon, 0, 0, 1, hoti+2, -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
if n+1 == len(showlist):
hoti = hoti + 20
if len(self.search.get_text()) > 0 and foundsearch == False:
#mouse over
if my in range(hoti+2, hoti+22) and my in range(0, h) and mx in range(0,w):
xgc.set_rgb_fg_color(gtk.gdk.color_parse("#3f3f3f")) ## CHOSE COLOR
widget.window.draw_rectangle(xgc, True, 0, hoti+2, w, 20)
if "GDK_BUTTON1" in str(fx) and self.allowed and "GDK_BUTTON1" not in str(self.mpf) and win.is_active() and sett: #IF CLICKED
def ee():
self.addingnew = dialogs.AddAsset(self.pf, "chr", self.search.get_text())
path = self.addingnew.getpath()
if len(path) > 0:
print path, "PATH"
CUR = path[5:path.rfind("/")]
NAME = path[path.rfind("/")+1:]
print CUR, "CUR"
self.listofitems.append([CUR, NAME])
self.search.set_text(NAME)
self.finalname.set_text("/dev/"+CUR+"/"+NAME)
glib.timeout_add(10, ee)
al = al + 1
ctx.set_font_size(15)
ctx.move_to( 30, hoti+17)
ctx.show_text('Create item "'+self.search.get_text()+'"')
widget.window.draw_pixbuf(None, self.plus, 0, 0, 1, hoti+2, -1, -1, gtk.gdk.RGB_DITHER_NONE, 0, 0)
# SCROLLING IT SELF
# the scroll is done with the middle mouse button
if self.mpy > my and "GDK_BUTTON2" in str(fx) and "GDK_BUTTON2" in str(self.mpf):
self.scroll = self.scroll + (my-self.mpy)
if self.mpy < my and "GDK_BUTTON2" in str(fx) and "GDK_BUTTON2" in str(self.mpf):
self.scroll = self.scroll - (self.mpy-my)
if self.scroll < 0-((n+al)*20)+h-33:
self.scroll = 0-((n+al)*20)+h-33
if self.scroll > 0:
self.scroll = 0
# TESTING SOMETHING
ctx.set_font_size(20)
ctx.move_to( mx, my)
#ctx.show_text(str(mx)+":"+str(my)+" "+str(self.winactive)+" "+str(fx)+" "+self.search.get_text()+" "+self.finalname.get_text())
self.dW = w
self.DH = h
self.mpx = mx
self.mpy = my
self.mpf = fx
def callback():
if self.allowed == True:
widget.queue_draw()
glib.timeout_add(1, callback)
graph = gtk.DrawingArea()
graph.set_size_request(400,400)
self.box.pack_start(graph)
graph.show()
graph.connect("expose-event", framegraph)
drawer = draw(pf, box, window, nameentry, finalname)
box.show_all()
r = window.run()
ret = False
if r == gtk.RESPONSE_APPLY:
ret = finalname.get_text()
window.destroy()
return ret
|
gpl-2.0
| 9,167,160,066,668,298,000
| 35.095238
| 151
| 0.385224
| false
| 4.282486
| false
| false
| false
|
NaN-tic/nereid
|
nereid/sessions.py
|
1
|
3596
|
#This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from datetime import datetime # noqa
from flask.sessions import SessionInterface, SessionMixin
from werkzeug.contrib.sessions import Session as SessionBase, SessionStore
from flask.globals import current_app
class Session(SessionBase, SessionMixin):
"Nereid Default Session Object"
class NullSession(Session):
"""
Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('the session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class MemcachedSessionStore(SessionStore):
"""
Session store that stores session on memcached
:param session_class: The session class to use.
Defaults to :class:`Session`.
"""
def __init__(self, session_class=Session):
SessionStore.__init__(self, session_class)
def save(self, session):
"""
Updates the session
"""
current_app.cache.set(
session.sid, dict(session), 30 * 24 * 60 * 60
)
def delete(self, session):
"""
Deletes the session
"""
current_app.cache.delete(session.sid)
def get(self, sid):
"""
Returns session
"""
if not self.is_valid_key(sid):
return self.new()
session_data = current_app.cache.get(sid)
if session_data is None:
session_data = {}
return self.session_class(session_data, sid, False)
def list(self):
"""
Lists all sessions in the store
"""
raise Exception("Not implemented yet")
class NereidSessionInterface(SessionInterface):
"""Session Management Class"""
session_store = MemcachedSessionStore()
null_session_class = NullSession
def open_session(self, app, request):
"""
Creates or opens a new session.
:param request: an instance of :attr:`request_class`.
"""
sid = request.cookies.get(app.session_cookie_name, None)
if sid:
return self.session_store.get(sid)
else:
return self.session_store.new()
def save_session(self, app, session, response):
"""
Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`.
:param session: the session to be saved
:param response: an instance of :attr:`response_class`
"""
if session.should_save:
self.session_store.save(session)
expires = self.get_expiration_time(app, session)
domain = self.get_cookie_domain(app)
from nereid.globals import request
sid = request.cookies.get(app.session_cookie_name, None)
if session.sid != sid:
# The only information in the session is the sid, and the
# only reason why a cookie should be set again is if that
# has changed
response.set_cookie(
app.session_cookie_name, session.sid,
expires=expires, httponly=False, domain=domain
)
|
gpl-3.0
| 3,200,084,190,623,582,000
| 31.107143
| 77
| 0.606229
| false
| 4.439506
| false
| false
| false
|
cortedeltimo/SickRage
|
sickbeard/clients/transmission_client.py
|
1
|
5187
|
# coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import re
from base64 import b64encode
import sickbeard
from sickbeard.clients.generic import GenericClient
class TransmissionAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(TransmissionAPI, self).__init__('Transmission', host, username, password)
self.url = '/'.join((self.host.rstrip('/'), sickbeard.TORRENT_RPCURL.strip('/'), 'rpc'))
def _get_auth(self):
post_data = json.dumps({'method': 'session-get', })
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120,
verify=sickbeard.TORRENT_VERIFY_CERT)
self.auth = re.search(r'X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1)
except Exception:
return None
self.session.headers.update({'x-transmission-session-id': self.auth})
# Validating Transmission authorization
post_data = json.dumps({'arguments': {},
'method': 'session-get'})
self._request(method='post', data=post_data)
return self.auth
def _add_torrent_uri(self, result):
arguments = {
'filename': result.url,
'paused': int(sickbeard.TORRENT_PAUSED)
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH + "/" + result.show.name + "/"
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _add_torrent_file(self, result):
arguments = {
'metainfo': b64encode(result.content),
'paused': 1 if sickbeard.TORRENT_PAUSED else 0
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH + "/" + result.show.name + "/"
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
mode = 0
if ratio:
if float(ratio) == -1:
ratio = 0
mode = 2
elif float(ratio) >= 0:
ratio = float(ratio)
mode = 1 # Stop seeding at seedRatioLimit
arguments = {'ids': [result.hash],
'seedRatioLimit': ratio,
'seedRatioMode': mode}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_seed_time(self, result):
if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1:
time = int(60 * float(sickbeard.TORRENT_SEED_TIME))
arguments = {'ids': [result.hash],
'seedIdleLimit': time,
'seedIdleMode': 1}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
else:
return True
def _set_torrent_priority(self, result):
arguments = {'ids': [result.hash]}
if result.priority == -1:
arguments['priority-low'] = []
elif result.priority == 1:
# set high priority for all files in torrent
arguments['priority-high'] = []
# move torrent to the top if the queue
arguments['queuePosition'] = 0
if sickbeard.TORRENT_HIGH_BANDWIDTH:
arguments['bandwidthPriority'] = 1
else:
arguments['priority-normal'] = []
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
api = TransmissionAPI()
|
gpl-3.0
| 5,219,753,062,150,899,000
| 31.622642
| 101
| 0.572393
| false
| 4.15625
| false
| false
| false
|
antoinearnoud/openfisca-france
|
openfisca_france/reforms/plfr2014.py
|
1
|
4011
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
from ..model.base import *
dir_path = os.path.join(os.path.dirname(__file__), 'parameters')
# TODO: les baisses de charges n'ont pas été codées car annulées (toute ou en partie ?)
# par le Conseil constitutionnel
class plfr2014(Reform):
name = u'Projet de Loi de Finances Rectificative 2014'
class reduction_impot_exceptionnelle(Variable):
definition_period = YEAR
def formula_2013_01_01(foyer_fiscal, period, parameters):
janvier = period.first_month
nb_adult = foyer_fiscal('nb_adult', period)
nb_parents = foyer_fiscal.declarant_principal.famille('nb_parents', period = janvier)
rfr = foyer_fiscal('rfr', period)
params = parameters(period).plfr2014.reduction_impot_exceptionnelle
plafond = params.seuil * nb_adult + (nb_parents - nb_adult) * 2 * params.majoration_seuil
montant = params.montant_plafond * nb_adult
return min_(max_(plafond + montant - rfr, 0), montant)
class reductions(Variable):
label = u"Somme des réductions d'impôt à intégrer pour l'année 2013"
definition_period = YEAR
def formula_2013_01_01(foyer_fiscal, period, parameters):
accult = foyer_fiscal('accult', period)
adhcga = foyer_fiscal('adhcga', period)
cappme = foyer_fiscal('cappme', period)
creaen = foyer_fiscal('creaen', period)
daepad = foyer_fiscal('daepad', period)
deffor = foyer_fiscal('deffor', period)
dfppce = foyer_fiscal('dfppce', period)
doment = foyer_fiscal('doment', period)
domlog = foyer_fiscal('domlog', period)
donapd = foyer_fiscal('donapd', period)
duflot = foyer_fiscal('duflot', period)
ecpess = foyer_fiscal('ecpess', period)
garext = foyer_fiscal('garext', period)
intagr = foyer_fiscal('intagr', period)
invfor = foyer_fiscal('invfor', period)
invlst = foyer_fiscal('invlst', period)
ip_net = foyer_fiscal('ip_net', period)
locmeu = foyer_fiscal('locmeu', period)
mecena = foyer_fiscal('mecena', period)
mohist = foyer_fiscal('mohist', period)
patnat = foyer_fiscal('patnat', period)
prcomp = foyer_fiscal('prcomp', period)
reduction_impot_exceptionnelle = foyer_fiscal('reduction_impot_exceptionnelle', period)
repsoc = foyer_fiscal('repsoc', period)
resimm = foyer_fiscal('resimm', period)
rsceha = foyer_fiscal('rsceha', period)
saldom = foyer_fiscal('saldom', period)
scelli = foyer_fiscal('scelli', period)
sofica = foyer_fiscal('sofica', period)
spfcpi = foyer_fiscal('spfcpi', period)
total_reductions = accult + adhcga + cappme + creaen + daepad + deffor + dfppce + doment + domlog + \
donapd + duflot + ecpess + garext + intagr + invfor + invlst + locmeu + mecena + mohist + patnat + \
prcomp + repsoc + resimm + rsceha + saldom + scelli + sofica + spfcpi + reduction_impot_exceptionnelle
return min_(ip_net, total_reductions)
def apply(self):
for variable in [self.reduction_impot_exceptionnelle, self.reductions]:
self.update_variable(variable)
self.modify_parameters(modifier_function = modify_parameters)
def modify_parameters(parameters):
file_path = os.path.join(dir_path, 'plfr2014.yaml')
plfr2014_parameters_subtree = load_parameter_file(name='plfr2014', file_path=file_path)
file_path = os.path.join(dir_path, 'plfrss2014.yaml')
plfrss2014_parameters_subtree = load_parameter_file(name='plfrss2014', file_path=file_path)
parameters.add_child('plfr2014', plfr2014_parameters_subtree)
parameters.add_child('plfrss2014', plfrss2014_parameters_subtree)
return parameters
|
agpl-3.0
| -1,933,542,702,604,743,700
| 44.477273
| 118
| 0.628186
| false
| 3.280328
| false
| false
| false
|
jelovirt/dita-generator
|
src/ditagen/web.py
|
1
|
5467
|
#!/usr/bin/env python
# -*- coding: UTF-8; indent-tabs-mode:nil; tab-width:4 -*-
# This file is part of DITA DTD Generator.
#
# Copyright 2009 Jarno Elovirta <http://www.elovirta.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import cgitb; cgitb.enable()
import ditagen.dita
import ditagen.dtdgen
import ditagen.dita.v1_1
import ditagen.dita.v1_2
import ditagen.generator
def print_error(__msg):
print_response_headers(None, 500, __msg)
print __msg
sys.exit()
def print_response_headers(__file_name, __code=200, __msg="Ok"):
print u"Status: %d %s" % (__code, __msg)
print u"Content-Type: text/plain; charset=UTF-8"
# print u"Content-disposition: attachment; file_name=%s.%s" % (__root, __f)
#print u"Content-disposition: file_name=%s" % __file_name #__dita.getfileName(__type, __root, __f)
print
def main(form):
"""Main method."""
__topic_type = None
__output_type = None
__id = None
__root = None
__owner = None
__nested = None
#__remove = {}
#__global_atts = None
__format = None
__domains = []
#__types = []
__version = "1.1"
__plugin_name = None
__stylesheet = None
__title = None
__file = None
try:
# read arguments
if u"version" in form:
__version = form.getfirst(u"version")
if __version not in ("1.1", "1.2"):
raise ValueError()
else:
print_error("version missing")
# get domains
for __d in form.getlist(u"domain"):
if __d in ditagen.DOMAIN_MAP[__version]:
__domains.append(ditagen.DOMAIN_MAP[__version][__d]())
# get type
__t = form.getfirst(u"type")
if __t in ditagen.TOPIC_MAP[__version]:
__topic_type = ditagen.TOPIC_MAP[__version][__t]()
__o = form.getfirst(u"output")
if __o in ditagen.OUTPUT_MAP:
__output_type = ditagen.OUTPUT_MAP[__o]
# get arguments
if u"id" in form:
__id = form.getfirst(u"id")
else:
print_error("id missing")
if u"root" in form:
__root = form.getfirst(u"root")
if u"owner" in form:
__owner = form.getfirst(u"owner")
else:
print_error("owner missing")
if u"title" in form:
__title = form.getfirst(u"title")
else:
print_error("title missing")
#if not __title:
# __title = __id.capitalize()
__nested = u"nested" in form
#__remove = dict([(n, True) for n in form.getlist("remove")])
#__global_atts = None#form.getfirst(u"attribute")
if u"file" in form:
__format = form.getfirst(u"file")
else:
print_error("file missing")
__stylesheet = form.getfirst(u"stylesheet")
__file = __id
#if __id is not None:
__topic_type = __output_type(__id, __title, __topic_type,
__owner, __file)#__root
if __topic_type == ditagen.dita.SpecializationType:
__topic_type.root = ditagen.dita.create_element(__topic_type, __root, __id)
# else would be reshelling
except:
#print u"HTTP/1.1 400 Invalid arguments"
#print
raise
# run generator
if __format== u"plugin" or not __format:
__dita_gen = ditagen.generator.PluginGenerator()
__dita_gen.out = sys.stdout
__dita_gen.topic_type = __topic_type
if not len(__domains) == 0:
__dita_gen.domains = __domains
__dita_gen.nested = __nested
__dita_gen.version = __version
#__dita_gen.set_title(__title)
if __stylesheet:
__dita_gen.set_stylesheet(__stylesheet)
if __plugin_name != None:
__dita_gen.plugin_name = __plugin_name
if __plugin_version != None:
__dita_gen.plugin_version = __plugin_version
__file_name = __dita_gen.get_file_name(__topic_type, __file, "zip")
print u"Status: 200 Ok"
#print u"Content-type: application/zip"
print u"Content-disposition: attachment; filename={0}".format(__file_name)
print
__dita_gen.generate_plugin()
else:
__dita_gen = ditagen.generator.DitaGenerator()
__dita_gen.out = sys.stdout
__dita_gen.topic_type = __topic_type
if not len(__domains) == 0:
__dita_gen.domains = __domains
__dita_gen.nested = __nested
__dita_gen.version = __version
__file_name = __dita_gen.get_file_name(__topic_type, __file, __format)
print_response_headers(__file_name)
if __format == u"dtd":
__dita_gen.generate_dtd()
elif __format == u"mod":
__dita_gen.generate_mod()
elif __format == u"ent":
__dita_gen.generate_ent()
if __name__ == "__main__":
main()
|
apache-2.0
| -8,890,596,476,996,714,000
| 33.16875
| 102
| 0.554966
| false
| 3.497761
| false
| false
| false
|
lukaslundgren/django-rest-framework-json-api
|
example/tests/test_model_viewsets.py
|
1
|
7677
|
import json
from django.contrib.auth import get_user_model
from django.utils import encoding
from django.core.urlresolvers import reverse
from django.conf import settings
from example.tests import TestBase
from example.tests.utils import dump_json, redump_json
from example.factories import CommentFactory
class ModelViewSetTests(TestBase):
"""
Test usage with ModelViewSets, also tests pluralization, camelization,
and underscore.
[<RegexURLPattern user-list ^identities/$>,
<RegexURLPattern user-detail ^identities/(?P<pk>[^/]+)/$>]
"""
list_url = reverse('user-list')
def setUp(self):
super(ModelViewSetTests, self).setUp()
self.detail_url = reverse('user-detail', kwargs={'pk': self.miles.pk})
setattr(settings, 'JSON_API_FORMAT_KEYS', 'dasherize')
def tearDown(self):
setattr(settings, 'JSON_API_FORMAT_KEYS', 'camelize')
def test_key_in_list_result(self):
"""
Ensure the result has a 'user' key since that is the name of the model
"""
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[0]
expected = {
'data': [
{
'type': 'users',
'id': encoding.force_text(user.pk),
'attributes': {
'first-name': user.first_name,
'last-name': user.last_name,
'email': user.email
},
}
],
'links': {
'first': 'http://testserver/identities?page=1',
'last': 'http://testserver/identities?page=2',
'next': 'http://testserver/identities?page=2',
'prev': None
},
'meta': {
'pagination': {
'page': 1,
'pages': 2,
'count': 2
}
}
}
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert expected_dump == content_dump
def test_page_two_in_list_result(self):
"""
Ensure that the second page is reachable and is the correct data.
"""
response = self.client.get(self.list_url, {'page': 2})
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[1]
expected = {
'data': [
{
'type': 'users',
'id': encoding.force_text(user.pk),
'attributes': {
'first-name': user.first_name,
'last-name': user.last_name,
'email': user.email
},
}
],
'links': {
'first': 'http://testserver/identities?page=1',
'last': 'http://testserver/identities?page=2',
'next': None,
'prev': 'http://testserver/identities?page=1',
},
'meta': {
'pagination': {
'page': 2,
'pages': 2,
'count': 2
}
}
}
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert expected_dump == content_dump
def test_page_range_in_list_result(self):
"""
Ensure that the range of a page can be changed from the client,
tests pluralization as two objects means it converts ``user`` to
``users``.
"""
response = self.client.get(self.list_url, {'page_size': 2})
self.assertEqual(response.status_code, 200)
users = get_user_model().objects.all()
expected = {
'data': [
{
'type': 'users',
'id': encoding.force_text(users[0].pk),
'attributes': {
'first-name': users[0].first_name,
'last-name': users[0].last_name,
'email': users[0].email
},
},
{
'type': 'users',
'id': encoding.force_text(users[1].pk),
'attributes': {
'first-name': users[1].first_name,
'last-name': users[1].last_name,
'email': users[1].email
},
}
],
'links': {
'first': 'http://testserver/identities?page=1&page_size=2',
'last': 'http://testserver/identities?page=1&page_size=2',
'next': None,
'prev': None
},
'meta': {
'pagination': {
'page': 1,
'pages': 1,
'count': 2
}
}
}
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert expected_dump == content_dump
def test_key_in_detail_result(self):
"""
Ensure the result has a 'user' key.
"""
response = self.client.get(self.detail_url)
self.assertEqual(response.status_code, 200)
expected = {
'data': {
'type': 'users',
'id': encoding.force_text(self.miles.pk),
'attributes': {
'first-name': self.miles.first_name,
'last-name': self.miles.last_name,
'email': self.miles.email
},
}
}
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert expected_dump == content_dump
def test_key_in_post(self):
"""
Ensure a key is in the post.
"""
self.client.login(username='miles', password='pw')
data = {
'data': {
'type': 'users',
'id': encoding.force_text(self.miles.pk),
'attributes': {
'first-name': self.miles.first_name,
'last-name': self.miles.last_name,
'email': 'miles@trumpet.org'
},
}
}
response = self.client.put(self.detail_url,
content_type='application/vnd.api+json',
data=dump_json(data))
content_dump = redump_json(response.content)
expected_dump = dump_json(data)
assert expected_dump == content_dump
# is it updated?
self.assertEqual(
get_user_model().objects.get(pk=self.miles.pk).email,
'miles@trumpet.org')
def test_required_relation(self):
"Author should be required on CommentSerializer"
comment = CommentFactory(author=None)
url = reverse('comment-detail', kwargs={'pk': comment.pk})
self.client.login(username='miles', password='pw')
data = {
'data': {
'type': 'comments',
'id': encoding.force_text(comment.pk),
'attributes': {},
'relationships': {'author': {'data': None}}
}
}
response = self.client.patch(url,
content_type='application/vnd.api+json',
data=dump_json(data))
self.assertEqual(response.status_code, 400)
|
bsd-2-clause
| 1,623,057,772,902,982,400
| 31.392405
| 78
| 0.466458
| false
| 4.364412
| true
| false
| false
|
angr/angr
|
angr/analyses/variable_recovery/engine_vex.py
|
1
|
11842
|
from typing import TYPE_CHECKING
import claripy
import pyvex
from ...engines.vex.claripy.datalayer import value as claripy_value
from ...engines.light import SimEngineLightVEXMixin
from ..typehoon import typevars, typeconsts
from .engine_base import SimEngineVRBase, RichR
if TYPE_CHECKING:
from .variable_recovery_base import VariableRecoveryStateBase
class SimEngineVRVEX(
SimEngineLightVEXMixin,
SimEngineVRBase,
):
state: 'VariableRecoveryStateBase'
# Statement handlers
def _handle_Put(self, stmt):
offset = stmt.offset
r = self._expr(stmt.data)
size = stmt.data.result_size(self.tyenv) // 8
if offset == self.arch.ip_offset:
return
self._assign_to_register(offset, r, size)
def _handle_Store(self, stmt):
addr_r = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) // 8
r = self._expr(stmt.data)
self._store(addr_r, r, size, stmt=stmt)
def _handle_StoreG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
addr = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) // 8
data = self._expr(stmt.data)
self._store(addr, data, size, stmt=stmt)
def _handle_LoadG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
addr = self._expr(stmt.addr)
if addr is not None:
self.tmps[stmt.dst] = self._load(addr, self.tyenv.sizeof(stmt.dst) // 8)
elif guard is False:
data = self._expr(stmt.alt)
self.tmps[stmt.dst] = data
else:
self.tmps[stmt.dst] = None
def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC):
if stmt.storedata is None:
# load-link
addr = self._expr(stmt.addr)
size = self.tyenv.sizeof(stmt.result) // self.arch.byte_width
data = self._load(addr, size)
self.tmps[stmt.result] = data
else:
# store-conditional
storedata = self._expr(stmt.storedata)
addr = self._expr(stmt.addr)
size = self.tyenv.sizeof(stmt.storedata.tmp) // self.arch.byte_width
self._store(addr, storedata, size)
self.tmps[stmt.result] = RichR(1)
def _handle_NoOp(self, stmt):
pass
# Expression handlers
def _expr(self, expr) -> RichR:
"""
:param expr:
:return:
:rtype: RichR
"""
r = super()._expr(expr)
if r is None:
bits = expr.result_size(self.tyenv)
return RichR(self.state.top(bits))
return r
def _handle_Get(self, expr):
reg_offset = expr.offset
reg_size = expr.result_size(self.tyenv) // 8
return self._read_from_register(reg_offset, reg_size, expr=expr)
def _handle_Load(self, expr: pyvex.IRExpr.Load) -> RichR:
addr = self._expr(expr.addr)
size = expr.result_size(self.tyenv) // 8
return self._load(addr, size)
def _handle_CCall(self, expr): # pylint:disable=useless-return
# ccalls don't matter
return RichR(self.state.top(expr.result_size(self.tyenv)))
def _handle_Conversion(self, expr: pyvex.IRExpr.Unop) -> RichR:
return RichR(self.state.top(expr.result_size(self.tyenv)))
# Function handlers
def _handle_function(self, func_addr): # pylint:disable=unused-argument,no-self-use,useless-return
return None
def _handle_Const(self, expr):
return RichR(claripy_value(expr.con.type, expr.con.value), typevar=typeconsts.int_type(expr.con.size))
def _handle_Add(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data + r1.data,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
typevar = None
if r0.typevar is not None and r1.data.concrete:
typevar = typevars.DerivedTypeVariable(r0.typevar, typevars.AddN(r1.data._model_concrete.value))
sum_ = r0.data + r1.data
return RichR(sum_,
typevar=typevar,
type_constraints={ typevars.Subtype(r0.typevar, r1.typevar) },
)
def _handle_Sub(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data - r1.data,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
typevar = None
if r0.typevar is not None and r1.data.concrete:
typevar = typevars.DerivedTypeVariable(r0.typevar, typevars.SubN(r1.data._model_concrete.value))
diff = r0.data - r1.data
return RichR(diff,
typevar=typevar,
)
def _handle_And(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data & r1.data)
if self.state.is_stack_address(r0.data):
r = r0.data
elif self.state.is_stack_address(r1.data):
r = r1.data
else:
r = self.state.top(result_size)
return RichR(r)
def _handle_Xor(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data ^ r1.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_Or(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data | r1.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_Not(self, expr):
arg = expr.args[0]
r0 = self._expr(arg)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete:
# constants
return RichR(~r0.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_Mul(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data * r1.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_DivMod(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
try:
signed = "U" in expr.op # Iop_DivModU64to32 vs Iop_DivMod
from_size = r0.data.size()
to_size = r1.data.size()
if signed:
quotient = (r0.data.SDiv(claripy.SignExt(from_size - to_size, r1.data)))
remainder = (r0.data.SMod(claripy.SignExt(from_size - to_size, r1.data)))
quotient_size = to_size
remainder_size = to_size
result = claripy.Concat(
claripy.Extract(remainder_size - 1, 0, remainder),
claripy.Extract(quotient_size - 1, 0, quotient)
)
else:
quotient = (r0.data // claripy.ZeroExt(from_size - to_size, r1.data))
remainder = (r0.data % claripy.ZeroExt(from_size - to_size, r1.data))
quotient_size = to_size
remainder_size = to_size
result = claripy.Concat(
claripy.Extract(remainder_size - 1, 0, remainder),
claripy.Extract(quotient_size - 1, 0, quotient)
)
return RichR(result)
except ZeroDivisionError:
pass
r = self.state.top(result_size)
return RichR(r)
def _handle_Div(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
try:
return RichR(r0.data / r1.data)
except ZeroDivisionError:
pass
r = self.state.top(result_size)
return RichR(r)
def _handle_Shr(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(claripy.LShR(r0.data, r1.data._model_concrete.value),
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = self.state.top(result_size)
return RichR(r,
typevar=r0.typevar,
)
def _handle_Sar(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data >> r1.data._model_concrete.value,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = self.state.top(result_size)
return RichR(r,
typevar=r0.typevar,
)
def _handle_Shl(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data << r1.data._model_concrete.value,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = self.state.top(result_size)
return RichR(r,
typevar=r0.typevar,
)
def _handle_CmpEQ(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpNE(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpLE(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpLT(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpGE(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpGT(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
|
bsd-2-clause
| 2,412,318,608,170,448,400
| 30.163158
| 110
| 0.542476
| false
| 3.449461
| false
| false
| false
|
lesteve/sphinx-gallery
|
sphinx_gallery/py_source_parser.py
|
1
|
6344
|
# -*- coding: utf-8 -*-
r"""
Parser for python source files
==============================
"""
# Created Sun Nov 27 14:03:07 2016
# Author: Óscar Nájera
from __future__ import division, absolute_import, print_function
import ast
from distutils.version import LooseVersion
from io import BytesIO
import re
import sys
import tokenize
from textwrap import dedent
from .sphinx_compatibility import getLogger
logger = getLogger('sphinx-gallery')
SYNTAX_ERROR_DOCSTRING = """
SyntaxError
===========
Example script with invalid Python syntax
"""
def parse_source_file(filename):
"""Parse source file into AST node
Parameters
----------
filename : str
File path
Returns
-------
node : AST node
content : utf-8 encoded string
"""
# can't use codecs.open(filename, 'r', 'utf-8') here b/c ast doesn't
# work with unicode strings in Python2.7 "SyntaxError: encoding
# declaration in Unicode string" In python 2.7 the string can't be
# encoded and have information about its encoding. That is particularly
# problematic since source files include in their header information
# about the file encoding.
# Minimal example to fail: ast.parse(u'# -*- coding: utf-8 -*-')
with open(filename, 'rb') as fid:
content = fid.read()
# change from Windows format to UNIX for uniformity
content = content.replace(b'\r\n', b'\n')
try:
node = ast.parse(content)
return node, content.decode('utf-8')
except SyntaxError:
return None, content.decode('utf-8')
def get_docstring_and_rest(filename):
"""Separate ``filename`` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring : str
docstring of ``filename``
rest : str
``filename`` content without the docstring
"""
node, content = parse_source_file(filename)
if node is None:
return SYNTAX_ERROR_DOCSTRING, content, 1
if not isinstance(node, ast.Module):
raise TypeError("This function only supports modules. "
"You provided {0}".format(node.__class__.__name__))
if not (node.body and isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
raise ValueError(('Could not find docstring in file "{0}". '
'A docstring is required by sphinx-gallery '
'unless the file is ignored by "ignore_pattern"')
.format(filename))
if LooseVersion(sys.version) >= LooseVersion('3.7'):
docstring = ast.get_docstring(node)
assert docstring is not None # should be guaranteed above
# This is just for backward compat
if len(node.body[0].value.s) and node.body[0].value.s[0] == '\n':
# just for strict backward compat here
docstring = '\n' + docstring
ts = tokenize.tokenize(BytesIO(content.encode()).readline)
# find the first string according to the tokenizer and get its end row
for tk in ts:
if tk.exact_type == 3:
lineno, _ = tk.end
break
else:
lineno = 0
else:
# this block can be removed when python 3.6 support is dropped
docstring_node = node.body[0]
docstring = docstring_node.value.s
# python2.7: Code was read in bytes needs decoding to utf-8
# unless future unicode_literals is imported in source which
# make ast output unicode strings
if hasattr(docstring, 'decode') and not isinstance(docstring, unicode):
docstring = docstring.decode('utf-8')
lineno = docstring_node.lineno # The last line of the string.
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = '\n'.join(content.split('\n')[lineno:])
lineno += 1
return docstring, rest, lineno
def extract_file_config(content):
"""
Pull out the file-specific config specified in the docstring.
"""
prop_pat = re.compile(
r"^\s*#\s*sphinx_gallery_([A-Za-z0-9_]+)\s*=\s*(.+)\s*$",
re.MULTILINE)
file_conf = {}
for match in re.finditer(prop_pat, content):
name = match.group(1)
value = match.group(2)
try:
value = ast.literal_eval(value)
except (SyntaxError, ValueError):
logger.warning(
'Sphinx-gallery option %s was passed invalid value %s',
name, value)
else:
file_conf[name] = value
return file_conf
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
file_conf : dict
File-specific settings given in source file comments as:
``# sphinx_gallery_<name> = <value>``
blocks : list
(label, content, line_number)
List where each element is a tuple with the label ('text' or 'code'),
the corresponding content string of block and the leading line number
"""
docstring, rest_of_content, lineno = get_docstring_and_rest(source_file)
blocks = [('text', docstring, 1)]
file_conf = extract_file_config(rest_of_content)
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
sub_pat = re.compile('^#', flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
code_block_content = rest_of_content[pos_so_far:match.start()]
if code_block_content.strip():
blocks.append(('code', code_block_content, lineno))
lineno += code_block_content.count('\n')
lineno += 1 # Ignored header line of hashes.
text_content = match.group('text_content')
text_block_content = dedent(re.sub(sub_pat, '', text_content)).lstrip()
if text_block_content.strip():
blocks.append(('text', text_block_content, lineno))
lineno += text_content.count('\n')
pos_so_far = match.end()
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content, lineno))
return file_conf, blocks
|
bsd-3-clause
| -6,063,947,884,348,162,000
| 32.378947
| 79
| 0.613213
| false
| 4.06278
| false
| false
| false
|
rjhd2/HadISD_v2
|
set_paths_and_vars.py
|
1
|
2534
|
#!/usr/local/sci/bin/python2.7
#------------------------------------------------------------
# SVN Info
#$Rev:: 84 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2015-12-18 16:35:07 +0000 (Fri, 18 Dec 2015) $: Date of last commit
#------------------------------------------------------------
# START
#------------------------------------------------------------
"""
Sets a load of paths and defaults.
Should be the only place to edit these each time around.
"""
import datetime as dt
import os
# File paths to read and use
HADISD_VERSION = "v201_2016p"
PREVIOUS_VERSION = "v200_2015p"
# For /project
ROOT_LOC = "/project/hadobs2/hadisd/{}".format(HADISD_VERSION)
INPUT_FILE_LOCS = "{}/code_{}/input_files/".format(ROOT_LOC, HADISD_VERSION)
# at the moment code and input files all stored on project.
# For SPICE/Slurm
ROOT_LOC = "/scratch/hadobs/{}/".format(HADISD_VERSION)
IMAGE_LOCS = "{}/img_files_{}/".format(ROOT_LOC, HADISD_VERSION)
if not os.path.exists(IMAGE_LOCS): os.mkdir(IMAGE_LOCS)
NETCDF_DATA_LOCS = "{}/netcdf_files_{}/".format(ROOT_LOC, HADISD_VERSION)
if not os.path.exists(NETCDF_DATA_LOCS): os.mkdir(NETCDF_DATA_LOCS)
ISD_DATA_LOCS = "{}/isd_files_{}/".format(ROOT_LOC, HADISD_VERSION)
if not os.path.exists(ISD_DATA_LOCS): os.mkdir(ISD_DATA_LOCS)
LOG_OUTFILE_LOCS = "{}/suppl_files_{}/".format(ROOT_LOC, HADISD_VERSION)
if not os.path.exists(LOG_OUTFILE_LOCS): os.mkdir(LOG_OUTFILE_LOCS)
OLD_ISD_DATA_LOCS = "/project/hadobs2/hadisd/{}/isd_files_{}/".format(PREVIOUS_VERSION, PREVIOUS_VERSION)
OLD_INPUT_FILE_LOCS = "/project/hadobs2/hadisd/{}/code_{}/input_files/".format(PREVIOUS_VERSION, PREVIOUS_VERSION)
# Other settings
DATASTART = dt.datetime(1931,1,1,0,0)
DATAEND = dt.datetime(2017,1,1,0,0)
process_vars = ["temperatures","dewpoints","slp","windspeeds", "winddirs", "total_cloud_cover","low_cloud_cover","mid_cloud_cover","high_cloud_cover"]
carry_thru_vars = ["cloud_base","precip1_depth","precip1_period","wind_gust", "past_sigwx1"]
# print for information each time - enables clearer checking
print "HadISD version: {}".format(HADISD_VERSION)
print "Data location : {}".format(NETCDF_DATA_LOCS)
print "Data range : {} - {}\n".format(dt.datetime.strftime(DATASTART, "%Y-%m-%d"), dt.datetime.strftime(DATAEND, "%Y-%m-%d"))
#------------------------------------------------------------
# END
#------------------------------------------------------------
|
bsd-3-clause
| -5,510,080,318,746,911,000
| 40.540984
| 150
| 0.59116
| false
| 3.053012
| false
| false
| false
|
Mushiyo/isso
|
isso/wsgi.py
|
1
|
5736
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import sys
import socket
try:
from urllib.parse import quote, urlparse
from socketserver import ThreadingMixIn
from http.server import HTTPServer
except ImportError:
from urllib import quote
from urlparse import urlparse
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer
from werkzeug.serving import WSGIRequestHandler
from werkzeug.wrappers import Request as _Request
from werkzeug.datastructures import Headers
from isso.compat import string_types
def host(environ): # pragma: no cover
"""
Reconstruct host from environment. A modified version
of http://www.python.org/dev/peps/pep-0333/#url-reconstruction
"""
url = environ['wsgi.url_scheme']+'://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
return url + quote(environ.get('SCRIPT_NAME', ''))
def urlsplit(name):
"""
Parse :param:`name` into (netloc, port, ssl)
"""
if not (isinstance(name, string_types)):
name = str(name)
if not name.startswith(('http://', 'https://')):
name = 'http://' + name
rv = urlparse(name)
if rv.scheme == 'https' and rv.port is None:
return rv.netloc, 443, True
return rv.netloc.rsplit(':')[0], rv.port or 80, rv.scheme == 'https'
def urljoin(netloc, port, ssl):
"""
Basically the counter-part of :func:`urlsplit`.
"""
rv = ("https" if ssl else "http") + "://" + netloc
if ssl and port != 443 or not ssl and port != 80:
rv += ":%i" % port
return rv
def origin(hosts):
"""
Return a function that returns a valid HTTP Origin or localhost
if none found.
"""
hosts = [urlsplit(h) for h in hosts]
def func(environ):
if not hosts:
return "http://invalid.local"
loc = environ.get("HTTP_ORIGIN", environ.get("HTTP_REFERER", None))
if loc is None:
return urljoin(*hosts[0])
for split in hosts:
if urlsplit(loc) == split:
return urljoin(*split)
else:
return urljoin(*hosts[0])
return func
class SubURI(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
return self.app(environ, start_response)
class CORSMiddleware(object):
"""Add Cross-origin resource sharing headers to every request."""
methods = ("HEAD", "GET", "POST", "PUT", "DELETE")
def __init__(self, app, origin, allowed=None, exposed=None):
self.app = app
self.origin = origin
self.allowed = allowed
self.exposed = exposed
def __call__(self, environ, start_response):
def add_cors_headers(status, headers, exc_info=None):
headers = Headers(headers)
headers.add("Access-Control-Allow-Origin", self.origin(environ))
headers.add("Access-Control-Allow-Credentials", "true")
headers.add("Access-Control-Allow-Methods", ", ".join(self.methods))
if self.allowed:
headers.add("Access-Control-Allow-Headers", ", ".join(self.allowed))
if self.exposed:
headers.add("Access-Control-Expose-Headers", ", ".join(self.exposed))
return start_response(status, headers.to_list(), exc_info)
if environ.get("REQUEST_METHOD") == "OPTIONS":
add_cors_headers("200 Ok", [("Content-Type", "text/plain")])
return []
return self.app(environ, add_cors_headers)
class LegacyWerkzeugMiddleware(object):
# Add compatibility with werkzeug 0.8
# -- https://github.com/posativ/isso/pull/170
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def fix_headers(status, headers, exc_info=None):
headers = [(to_native(key), value) for key, value in headers]
return start_response(status, headers, exc_info)
return self.app(environ, fix_headers)
class Request(_Request):
# Assuming UTF-8, comments with 65536 characters would consume
# 128 kb memory. The remaining 128 kb cover additional parameters
# and WSGI headers.
max_content_length = 256 * 1024
class SocketWSGIRequestHandler(WSGIRequestHandler):
def run_wsgi(self):
self.client_address = ("<local>", 0)
super(SocketWSGIRequestHandler, self).run_wsgi()
class SocketHTTPServer(HTTPServer, ThreadingMixIn):
"""
A simple SocketServer to serve werkzeug's WSGIRequesthandler.
"""
multithread = True
multiprocess = False
allow_reuse_address = 1
address_family = socket.AF_UNIX
request_queue_size = 128
def __init__(self, sock, app):
HTTPServer.__init__(self, sock, SocketWSGIRequestHandler)
self.app = app
self.ssl_context = None
self.shutdown_signal = False
|
mit
| -1,876,850,611,275,330,300
| 26.84466
| 85
| 0.607915
| false
| 4.030921
| false
| false
| false
|
lidatong/dataclasses-json
|
dataclasses_json/utils.py
|
1
|
4383
|
import inspect
import sys
from datetime import datetime, timezone
from typing import Collection, Mapping, Optional, TypeVar, Any
def _get_type_cons(type_):
"""More spaghetti logic for 3.6 vs. 3.7"""
if sys.version_info.minor == 6:
try:
cons = type_.__extra__
except AttributeError:
try:
cons = type_.__origin__
except AttributeError:
cons = type_
else:
cons = type_ if cons is None else cons
else:
try:
cons = type_.__origin__ if cons is None else cons
except AttributeError:
cons = type_
else:
cons = type_.__origin__
return cons
def _get_type_origin(type_):
"""Some spaghetti logic to accommodate differences between 3.6 and 3.7 in
the typing api"""
try:
origin = type_.__origin__
except AttributeError:
if sys.version_info.minor == 6:
try:
origin = type_.__extra__
except AttributeError:
origin = type_
else:
origin = type_ if origin is None else origin
else:
origin = type_
return origin
def _hasargs(type_, *args):
try:
res = all(arg in type_.__args__ for arg in args)
except AttributeError:
return False
else:
return res
def _isinstance_safe(o, t):
try:
result = isinstance(o, t)
except Exception:
return False
else:
return result
def _issubclass_safe(cls, classinfo):
try:
return issubclass(cls, classinfo)
except Exception:
return (_is_new_type_subclass_safe(cls, classinfo)
if _is_new_type(cls)
else False)
def _is_new_type_subclass_safe(cls, classinfo):
super_type = getattr(cls, "__supertype__", None)
if super_type:
return _is_new_type_subclass_safe(super_type, classinfo)
try:
return issubclass(cls, classinfo)
except Exception:
return False
def _is_new_type(type_):
return inspect.isfunction(type_) and hasattr(type_, "__supertype__")
def _is_optional(type_):
return (_issubclass_safe(type_, Optional) or
_hasargs(type_, type(None)) or
type_ is Any)
def _is_mapping(type_):
return _issubclass_safe(_get_type_origin(type_), Mapping)
def _is_collection(type_):
return _issubclass_safe(_get_type_origin(type_), Collection)
def _is_nonstr_collection(type_):
return (_issubclass_safe(_get_type_origin(type_), Collection)
and not _issubclass_safe(type_, str))
def _timestamp_to_dt_aware(timestamp: float):
tz = datetime.now(timezone.utc).astimezone().tzinfo
dt = datetime.fromtimestamp(timestamp, tz=tz)
return dt
def _undefined_parameter_action_safe(cls):
try:
if cls.dataclass_json_config is None:
return
action_enum = cls.dataclass_json_config['undefined']
except (AttributeError, KeyError):
return
if action_enum is None or action_enum.value is None:
return
return action_enum
def _handle_undefined_parameters_safe(cls, kvs, usage: str):
"""
Checks if an undefined parameters action is defined and performs the
according action.
"""
undefined_parameter_action = _undefined_parameter_action_safe(cls)
usage = usage.lower()
if undefined_parameter_action is None:
return kvs if usage != "init" else cls.__init__
if usage == "from":
return undefined_parameter_action.value.handle_from_dict(cls=cls,
kvs=kvs)
elif usage == "to":
return undefined_parameter_action.value.handle_to_dict(obj=cls,
kvs=kvs)
elif usage == "dump":
return undefined_parameter_action.value.handle_dump(obj=cls)
elif usage == "init":
return undefined_parameter_action.value.create_init(obj=cls)
else:
raise ValueError(
f"usage must be one of ['to', 'from', 'dump', 'init'], "
f"but is '{usage}'")
# Define a type for the CatchAll field
# https://stackoverflow.com/questions/59360567/define-a-custom-type-that-behaves-like-typing-any
CatchAllVar = TypeVar("CatchAllVar", bound=Mapping)
|
mit
| 3,251,248,470,436,851,000
| 27.096154
| 96
| 0.588182
| false
| 4.017415
| false
| false
| false
|
simonvh/fluff
|
tests/test_commands.py
|
1
|
4188
|
import pytest
import os
import urllib.request
import tarfile
from tempfile import NamedTemporaryFile
from sklearn.metrics import v_measure_score
from fluff.parse import parse_cmds
from fluff.commands.profile import profile
from fluff.commands.heatmap import heatmap
from fluff.commands.bandplot import bandplot
@pytest.fixture
def bamfile():
return "tests/data/H3K4me3.bam"
@pytest.fixture
def bwfile():
return "tests/data/profile.bw"
@pytest.fixture
def bedfile():
return "tests/data/profile.bed"
@pytest.fixture
def regionfile():
return "tests/data/profile_region.bed"
@pytest.fixture
def test_data_from_osf():
fnames = [
"tests/data/big/H1_H3K27ac.bam",
"tests/data/big/H1_H3K27ac.bam.bai",
"tests/data/big/mesenchymal_H3K27ac.bam",
"tests/data/big/mesenchymal_H3K27ac.bam.bai",
"tests/data/big/mesendoderm_H3K27ac.bam",
"tests/data/big/mesendoderm_H3K27ac.bam.bai",
"tests/data/big/neuronal_progenitor_H3K27ac.bam",
"tests/data/big/neuronal_progenitor_H3K27ac.bam.bai",
"tests/data/big/trophoblast_H3K27ac.bam",
"tests/data/big/trophoblast_H3K27ac.bam.bai",
"tests/data/big/peaks.bed",
]
download = False
for fname in fnames:
if not os.path.exists(fname):
download = True
break
if download:
# test data tarball on osf.io
url = "https://osf.io/6yftg/download"
tarball = "tests/data/big/test_data.tgz"
urllib.request.urlretrieve(url, tarball)
with tarfile.open(tarball) as tf:
tf.extractall(path="tests/data/big/")
os.unlink(tarball)
clusters = "tests/data/big/clusters.kmeans.euclidean.5.txt"
return fnames[-1], [f for f in fnames if f[-3:] == "bam"], clusters
def test_profile(bamfile):
# Only tests of the command runs successfully,
# doesnt't check the image
with NamedTemporaryFile(prefix="fluff.", suffix=".png") as tmp:
args = ["profile",
"-i", "scaffold_1:44749422-44750067",
"-d", bamfile,
"-o", tmp.name]
args = parse_cmds().parse_args(args)
profile(args)
def test_heatmap(bamfile, bedfile, bwfile, regionfile):
# Only tests of the command runs successfully,
# doesnt't check the image
with NamedTemporaryFile(prefix="fluff.", suffix=".png") as tmp:
args = ["heatmap",
"-f", regionfile,
"-d", bamfile, bwfile, bedfile,
"-o", tmp.name]
args = parse_cmds().parse_args(args)
heatmap(args)
def test_plots_big(test_data_from_osf):
peaks, bamfiles, clusters = test_data_from_osf
with NamedTemporaryFile(prefix="fluff.", suffix=".png") as f:
args = [
"heatmap",
'-f', peaks,
"-d", *bamfiles,
"-o", f.name,
"-C", "kmeans",
"-k", "5",
]
args = parse_cmds().parse_args(args)
heatmap(args)
# Reading current clusters
fcluster = f.name + "_clusters.bed"
pred_clusters = []
for line in open(fcluster):
vals = line.strip().split("\t")
pred_clusters.append([f"{vals[0]}:{vals[1]}-{vals[2]}", vals[4]])
# Reading reference clusters
cmp_clusters = []
for line in open(clusters):
vals = line.strip().split("\t")
cmp_clusters.append(vals)
# sort by region name
cmp_clusters = [x[1] for x in sorted(cmp_clusters)]
pred_clusters = [x[1] for x in sorted(pred_clusters)]
v = v_measure_score(cmp_clusters, pred_clusters)
assert v > 0.25
# Test bandplot
args = [
"bandplot",
'-f', fcluster,
"-d", *bamfiles,
"-o", f.name,
]
args = parse_cmds().parse_args(args)
bandplot(args)
tmpnames = [
f.name + "_clusters.bed",
f.name + "_readCounts.txt",
]
for name in tmpnames:
os.unlink(name)
|
mit
| -5,502,848,502,794,335,000
| 29.794118
| 77
| 0.567813
| false
| 3.46976
| true
| false
| false
|
tbursztyka/python-elf
|
setup.py
|
1
|
1070
|
##############
# Setup File #
##############
"""
python-elf - A python library to manipulate ELF format
Copyright (C) 2008 Tomasz Bursztyka
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from distutils.core import setup, Extension
elf_packages = ['elf', 'elf.core']
setup (name = 'elf',
version = '0.1',
description = 'python-elf',
packages = elf_packages,
author = "Tomasz Bursztyka"
)
#######
# EOF #
#######
|
lgpl-3.0
| 4,951,030,473,274,327,000
| 28.722222
| 73
| 0.663551
| false
| 4.099617
| false
| false
| false
|
toabctl/contrail-sandesh
|
library/python/pysandesh/sandesh_stats.py
|
1
|
10781
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# sandesh_stats.py
#
from pysandesh.sandesh_base import Sandesh
from pysandesh.gen_py.sandesh_uve.ttypes import SandeshMessageStats
from pysandesh.gen_py.sandesh.ttypes import SandeshTxDropReason, \
SandeshRxDropReason
class SandeshMessageStatistics(object):
def __init__(self):
self._message_type_stats = {}
self._aggregate_stats = SandeshMessageStats()
# end __init__
def message_type_stats(self):
return self._message_type_stats
# end message_type_stats
def aggregate_stats(self):
return self._aggregate_stats
# end aggregate_stats
def update_tx_stats(self, message_type, nbytes,
drop_reason=SandeshTxDropReason.NoDrop):
if SandeshTxDropReason.MinDropReason < drop_reason < \
SandeshTxDropReason.MaxDropReason:
try:
message_stats = self._message_type_stats[message_type]
except KeyError:
message_stats = SandeshMessageStats()
self._message_type_stats[message_type] = message_stats
finally:
self._update_tx_stats_internal(message_stats, nbytes,
drop_reason)
self._update_tx_stats_internal(self._aggregate_stats, nbytes,
drop_reason)
return True
return False
# end update_tx_stats
def update_rx_stats(self, message_type, nbytes,
drop_reason=SandeshRxDropReason.NoDrop):
if SandeshRxDropReason.MinDropReason < drop_reason < \
SandeshRxDropReason.MaxDropReason:
try:
message_stats = self._message_type_stats[message_type]
except KeyError:
message_stats = SandeshMessageStats()
self._message_type_stats[message_type] = message_stats
finally:
self._update_rx_stats_internal(message_stats, nbytes,
drop_reason)
self._update_rx_stats_internal(self._aggregate_stats, nbytes,
drop_reason)
return True
return False
# end update_rx_stats
def _update_tx_stats_internal(self, msg_stats, nbytes, drop_reason):
if drop_reason is SandeshTxDropReason.NoDrop:
try:
msg_stats.messages_sent += 1
msg_stats.bytes_sent += nbytes
except TypeError:
msg_stats.messages_sent = 1
msg_stats.bytes_sent = nbytes
else:
if msg_stats.messages_sent_dropped:
msg_stats.messages_sent_dropped += 1
msg_stats.bytes_sent_dropped += nbytes
else:
msg_stats.messages_sent_dropped = 1
msg_stats.bytes_sent_dropped = nbytes
if drop_reason is SandeshTxDropReason.ValidationFailed:
if msg_stats.messages_sent_dropped_validation_failed:
msg_stats.messages_sent_dropped_validation_failed += 1
msg_stats.bytes_sent_dropped_validation_failed += nbytes
else:
msg_stats.messages_sent_dropped_validation_failed = 1
msg_stats.bytes_sent_dropped_validation_failed = nbytes
elif drop_reason is SandeshTxDropReason.QueueLevel:
if msg_stats.messages_sent_dropped_queue_level:
msg_stats.messages_sent_dropped_queue_level += 1
msg_stats.bytes_sent_dropped_queue_level += nbytes
else:
msg_stats.messages_sent_dropped_queue_level = 1
msg_stats.bytes_sent_dropped_queue_level = nbytes
elif drop_reason is SandeshTxDropReason.NoClient:
if msg_stats.messages_sent_dropped_no_client:
msg_stats.messages_sent_dropped_no_client += 1
msg_stats.bytes_sent_dropped_no_client += nbytes
else:
msg_stats.messages_sent_dropped_no_client = 1
msg_stats.bytes_sent_dropped_no_client = nbytes
elif drop_reason is SandeshTxDropReason.NoSession:
if msg_stats.messages_sent_dropped_no_session:
msg_stats.messages_sent_dropped_no_session += 1
msg_stats.bytes_sent_dropped_no_session += nbytes
else:
msg_stats.messages_sent_dropped_no_session = 1
msg_stats.bytes_sent_dropped_no_session = nbytes
elif drop_reason is SandeshTxDropReason.NoQueue:
if msg_stats.messages_sent_dropped_no_queue:
msg_stats.messages_sent_dropped_no_queue += 1
msg_stats.bytes_sent_dropped_no_queue += nbytes
else:
msg_stats.messages_sent_dropped_no_queue = 1
msg_stats.bytes_sent_dropped_no_queue = nbytes
elif drop_reason is SandeshTxDropReason.ClientSendFailed:
if msg_stats.messages_sent_dropped_client_send_failed:
msg_stats.messages_sent_dropped_client_send_failed += 1
msg_stats.bytes_sent_dropped_client_send_failed += nbytes
else:
msg_stats.messages_sent_dropped_client_send_failed = 1
msg_stats.bytes_sent_dropped_client_send_failed = nbytes
elif drop_reason is SandeshTxDropReason.HeaderWriteFailed:
if msg_stats.messages_sent_dropped_header_write_failed:
msg_stats.messages_sent_dropped_header_write_failed += 1
msg_stats.bytes_sent_dropped_header_write_failed += nbytes
else:
msg_stats.messages_sent_dropped_header_write_failed = 1
msg_stats.bytes_sent_dropped_header_write_failed = nbytes
elif drop_reason is SandeshTxDropReason.WriteFailed:
if msg_stats.messages_sent_dropped_write_failed:
msg_stats.messages_sent_dropped_write_failed += 1
msg_stats.bytes_sent_dropped_write_failed += nbytes
else:
msg_stats.messages_sent_dropped_write_failed = 1
msg_stats.bytes_sent_dropped_write_failed = nbytes
elif drop_reason is SandeshTxDropReason.SessionNotConnected:
if msg_stats.messages_sent_dropped_session_not_connected:
msg_stats.messages_sent_dropped_session_not_connected += 1
msg_stats.bytes_sent_dropped_session_not_connected += nbytes
else:
msg_stats.messages_sent_dropped_session_not_connected = 1
msg_stats.bytes_sent_dropped_session_not_connected = nbytes
elif drop_reason is SandeshTxDropReason.WrongClientSMState:
if msg_stats.messages_sent_dropped_wrong_client_sm_state:
msg_stats.messages_sent_dropped_wrong_client_sm_state += 1
msg_stats.bytes_sent_dropped_wrong_client_sm_state += nbytes
else:
msg_stats.messages_sent_dropped_wrong_client_sm_state = 1
msg_stats.bytes_sent_dropped_wrong_client_sm_state = nbytes
else:
assert 0, 'Unhandled Tx drop reason <%s>' % (str(drop_reason))
# end _update_tx_stats_internal
def _update_rx_stats_internal(self, msg_stats, nbytes, drop_reason):
if drop_reason is SandeshRxDropReason.NoDrop:
if msg_stats.messages_received:
msg_stats.messages_received += 1
msg_stats.bytes_received += nbytes
else:
msg_stats.messages_received = 1
msg_stats.bytes_received = nbytes
else:
if msg_stats.messages_received_dropped:
msg_stats.messages_received_dropped += 1
msg_stats.bytes_received_dropped += nbytes
else:
msg_stats.messages_received_dropped = 1
msg_stats.bytes_received_dropped = nbytes
if drop_reason is SandeshRxDropReason.QueueLevel:
if msg_stats.messages_received_dropped_queue_level:
msg_stats.messages_received_dropped_queue_level += 1
msg_stats.bytes_received_dropped_queue_level += nbytes
else:
msg_stats.messages_received_dropped_queue_level = 1
msg_stats.bytes_received_dropped_queue_level = nbytes
elif drop_reason is SandeshRxDropReason.NoQueue:
if msg_stats.messages_received_dropped_no_queue:
msg_stats.messages_received_dropped_no_queue += 1
msg_stats.bytes_received_dropped_no_queue += nbytes
else:
msg_stats.messages_received_dropped_no_queue = 1
msg_stats.bytes_received_dropped_no_queue = nbytes
elif drop_reason is SandeshRxDropReason.ControlMsgFailed:
if msg_stats.messages_received_dropped_control_msg_failed:
msg_stats.messages_received_dropped_control_msg_failed += 1
msg_stats.bytes_received_dropped_control_msg_failed += nbytes
else:
msg_stats.messages_received_dropped_control_msg_failed = 1
msg_stats.bytes_received_dropped_control_msg_failed = nbytes
elif drop_reason is SandeshRxDropReason.CreateFailed:
if msg_stats.messages_received_dropped_create_failed:
msg_stats.messages_received_dropped_create_failed += 1
msg_stats.bytes_received_dropped_create_failed += nbytes
else:
msg_stats.messages_received_dropped_create_failed = 1
msg_stats.bytes_received_dropped_create_failed = nbytes
elif drop_reason is SandeshRxDropReason.DecodingFailed:
if msg_stats.messages_received_dropped_decoding_failed:
msg_stats.messages_received_dropped_decoding_failed += 1
msg_stats.bytes_received_dropped_decoding_failed += nbytes
else:
msg_stats.messages_received_dropped_decoding_failed = 1
msg_stats.bytes_received_dropped_decoding_failed = nbytes
else:
assert 0, 'Unhandled Rx drop reason <%s>' % (str(drop_reason))
# end _update_rx_stats_internal
# end class SandeshMessageStatistics
|
apache-2.0
| 7,344,674,149,269,573,000
| 50.831731
| 81
| 0.581208
| false
| 4.216269
| false
| false
| false
|
zasdfgbnm/qutip
|
qutip/control/grape.py
|
1
|
19982
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains functions that implement the GRAPE algorithm for
calculating pulse sequences for quantum systems.
"""
__all__ = ['plot_grape_control_fields',
'grape_unitary', 'cy_grape_unitary', 'grape_unitary_adaptive']
import warnings
import time
import numpy as np
from scipy.interpolate import interp1d
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.ui.progressbar import BaseProgressBar
from qutip.control.cy_grape import cy_overlap, cy_grape_inner
from qutip.qip.gates import gate_sequence_product
import qutip.logging
logger = qutip.logging.get_logger()
class GRAPEResult:
"""
Class for representing the result of a GRAPE simulation.
Attributes
----------
u : array
GRAPE control pulse matrix.
H_t : time-dependent Hamiltonian
The time-dependent Hamiltonian that realize the GRAPE pulse sequence.
U_f : Qobj
The final unitary transformation that is realized by the evolution
of the system with the GRAPE generated pulse sequences.
"""
def __init__(self, u=None, H_t=None, U_f=None):
self.u = u
self.H_t = H_t
self.U_f = U_f
def plot_grape_control_fields(times, u, labels, uniform_axes=False):
"""
Plot a series of plots showing the GRAPE control fields given in the
given control pulse matrix u.
Parameters
----------
times : array
Time coordinate array.
u : array
Control pulse matrix.
labels : list
List of labels for each control pulse sequence in the control pulse
matrix.
uniform_axes : bool
Whether or not to plot all pulse sequences using the same y-axis scale.
"""
import matplotlib.pyplot as plt
R, J, M = u.shape
fig, axes = plt.subplots(J, 1, figsize=(8, 2 * J), squeeze=False)
y_max = abs(u).max()
for r in range(R):
for j in range(J):
if r == R - 1:
lw, lc, alpha = 2.0, 'k', 1.0
axes[j, 0].set_ylabel(labels[j], fontsize=18)
axes[j, 0].set_xlabel(r'$t$', fontsize=18)
axes[j, 0].set_xlim(0, times[-1])
else:
lw, lc, alpha = 0.5, 'b', 0.25
axes[j, 0].step(times, u[r, j, :], lw=lw, color=lc, alpha=alpha)
if uniform_axes:
axes[j, 0].set_ylim(-y_max, y_max)
fig.tight_layout()
return fig, axes
def _overlap(A, B):
return (A.dag() * B).tr() / A.shape[0]
# return cy_overlap(A.data, B.data)
def grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None,
u_limits=None, interp_kind='linear', use_interp=False,
alpha=None, beta=None, phase_sensitive=True,
progress_bar=BaseProgressBar()):
"""
Calculate control pulses for the Hamiltonian operators in H_ops so that the
unitary U is realized.
Experimental: Work in progress.
Parameters
----------
U : Qobj
Target unitary evolution operator.
H0 : Qobj
Static Hamiltonian (that cannot be tuned by the control fields).
H_ops: list of Qobj
A list of operators that can be tuned in the Hamiltonian via the
control fields.
R : int
Number of GRAPE iterations.
time : array / list
Array of time coordinates for control pulse evalutation.
u_start : array
Optional array with initial control pulse values.
Returns
-------
Instance of GRAPEResult, which contains the control pulses calculated
with GRAPE, a time-dependent Hamiltonian that is defined by the
control pulses, as well as the resulting propagator.
"""
if eps is None:
eps = 0.1 * (2 * np.pi) / (times[-1])
M = len(times)
J = len(H_ops)
u = np.zeros((R, J, M))
if u_limits and len(u_limits) != 2:
raise ValueError("u_limits must be a list with two values")
if u_limits:
warnings.warn("Caution: Using experimental feature u_limits")
if u_limits and u_start:
# make sure that no values in u0 violates the u_limits conditions
u_start = np.array(u_start)
u_start[u_start < u_limits[0]] = u_limits[0]
u_start[u_start > u_limits[1]] = u_limits[1]
if u_start is not None:
for idx, u0 in enumerate(u_start):
u[0, idx, :] = u0
if beta:
warnings.warn("Causion: Using experimental feature time-penalty")
progress_bar.start(R)
for r in range(R - 1):
progress_bar.update(r)
dt = times[1] - times[0]
if use_interp:
ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind,
bounds_error=False, fill_value=u[r, j, -1])
for j in range(J)]
def _H_t(t, args=None):
return H0 + sum([float(ip_funcs[j](t)) * H_ops[j]
for j in range(J)])
U_list = [(-1j * _H_t(times[idx]) * dt).expm()
for idx in range(M-1)]
else:
def _H_idx(idx):
return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)])
U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)]
U_f_list = []
U_b_list = []
U_f = 1
U_b = 1
for n in range(M - 1):
U_f = U_list[n] * U_f
U_f_list.append(U_f)
U_b_list.insert(0, U_b)
U_b = U_list[M - 2 - n].dag() * U_b
for j in range(J):
for m in range(M-1):
P = U_b_list[m] * U
Q = 1j * dt * H_ops[j] * U_f_list[m]
if phase_sensitive:
du = - _overlap(P, Q)
else:
du = - 2 * _overlap(P, Q) * _overlap(U_f_list[m], P)
if alpha:
# penalty term for high power control signals u
du += -2 * alpha * u[r, j, m] * dt
if beta:
# penalty term for late control signals u
du += -2 * beta * m * u[r, j, m] * dt
u[r + 1, j, m] = u[r, j, m] + eps * du.real
if u_limits:
if u[r + 1, j, m] < u_limits[0]:
u[r + 1, j, m] = u_limits[0]
elif u[r + 1, j, m] > u_limits[1]:
u[r + 1, j, m] = u_limits[1]
u[r + 1, j, -1] = u[r + 1, j, -2]
if use_interp:
ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind,
bounds_error=False, fill_value=u[R - 1, j, -1])
for j in range(J)]
H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)]
for j in range(J)]
else:
H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)]
progress_bar.finished()
# return U_f_list[-1], H_td_func, u
return GRAPEResult(u=u, U_f=U_f_list[-1], H_t=H_td_func)
def cy_grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None,
u_limits=None, interp_kind='linear', use_interp=False,
alpha=None, beta=None, phase_sensitive=True,
progress_bar=BaseProgressBar()):
"""
Calculate control pulses for the Hamitonian operators in H_ops so that the
unitary U is realized.
Experimental: Work in progress.
Parameters
----------
U : Qobj
Target unitary evolution operator.
H0 : Qobj
Static Hamiltonian (that cannot be tuned by the control fields).
H_ops: list of Qobj
A list of operators that can be tuned in the Hamiltonian via the
control fields.
R : int
Number of GRAPE iterations.
time : array / list
Array of time coordinates for control pulse evalutation.
u_start : array
Optional array with initial control pulse values.
Returns
-------
Instance of GRAPEResult, which contains the control pulses calculated
with GRAPE, a time-dependent Hamiltonian that is defined by the
control pulses, as well as the resulting propagator.
"""
if eps is None:
eps = 0.1 * (2 * np.pi) / (times[-1])
M = len(times)
J = len(H_ops)
u = np.zeros((R, J, M))
H_ops_data = [H_op.data for H_op in H_ops]
if u_limits and len(u_limits) != 2:
raise ValueError("u_limits must be a list with two values")
if u_limits:
warnings.warn("Causion: Using experimental feature u_limits")
if u_limits and u_start:
# make sure that no values in u0 violates the u_limits conditions
u_start = np.array(u_start)
u_start[u_start < u_limits[0]] = u_limits[0]
u_start[u_start > u_limits[1]] = u_limits[1]
if u_limits:
use_u_limits = 1
u_min = u_limits[0]
u_max = u_limits[1]
else:
use_u_limits = 0
u_min = 0.0
u_max = 0.0
if u_start is not None:
for idx, u0 in enumerate(u_start):
u[0, idx, :] = u0
if beta:
warnings.warn("Causion: Using experimental feature time-penalty")
alpha_val = alpha if alpha else 0.0
beta_val = beta if beta else 0.0
progress_bar.start(R)
for r in range(R - 1):
progress_bar.update(r)
dt = times[1] - times[0]
if use_interp:
ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind,
bounds_error=False, fill_value=u[r, j, -1])
for j in range(J)]
def _H_t(t, args=None):
return H0 + sum([float(ip_funcs[j](t)) * H_ops[j]
for j in range(J)])
U_list = [(-1j * _H_t(times[idx]) * dt).expm().data
for idx in range(M-1)]
else:
def _H_idx(idx):
return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)])
U_list = [(-1j * _H_idx(idx) * dt).expm().data
for idx in range(M-1)]
U_f_list = []
U_b_list = []
U_f = 1
U_b = sp.eye(*(U.shape))
for n in range(M - 1):
U_f = U_list[n] * U_f
U_f_list.append(U_f)
U_b_list.insert(0, U_b)
U_b = U_list[M - 2 - n].T.conj().tocsr() * U_b
cy_grape_inner(U.data, u, r, J, M, U_b_list, U_f_list, H_ops_data,
dt, eps, alpha_val, beta_val, phase_sensitive,
use_u_limits, u_min, u_max)
if use_interp:
ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind,
bounds_error=False, fill_value=u[R - 1, j, -1])
for j in range(J)]
H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)]
for j in range(J)]
else:
H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)]
progress_bar.finished()
return GRAPEResult(u=u, U_f=Qobj(U_f_list[-1], dims=U.dims),
H_t=H_td_func)
def grape_unitary_adaptive(U, H0, H_ops, R, times, eps=None, u_start=None,
u_limits=None, interp_kind='linear',
use_interp=False, alpha=None, beta=None,
phase_sensitive=False, overlap_terminate=1.0,
progress_bar=BaseProgressBar()):
"""
Calculate control pulses for the Hamiltonian operators in H_ops so that
the unitary U is realized.
Experimental: Work in progress.
Parameters
----------
U : Qobj
Target unitary evolution operator.
H0 : Qobj
Static Hamiltonian (that cannot be tuned by the control fields).
H_ops: list of Qobj
A list of operators that can be tuned in the Hamiltonian via the
control fields.
R : int
Number of GRAPE iterations.
time : array / list
Array of time coordinates for control pulse evalutation.
u_start : array
Optional array with initial control pulse values.
Returns
-------
Instance of GRAPEResult, which contains the control pulses calculated
with GRAPE, a time-dependent Hamiltonian that is defined by the
control pulses, as well as the resulting propagator.
"""
if eps is None:
eps = 0.1 * (2 * np.pi) / (times[-1])
eps_vec = np.array([eps / 2, eps, 2 * eps])
eps_log = np.zeros(R)
overlap_log = np.zeros(R)
best_k = 0
_k_overlap = np.array([0.0, 0.0, 0.0])
M = len(times)
J = len(H_ops)
K = len(eps_vec)
Uf = [None for _ in range(K)]
u = np.zeros((R, J, M, K))
if u_limits and len(u_limits) != 2:
raise ValueError("u_limits must be a list with two values")
if u_limits:
warnings.warn("Causion: Using experimental feature u_limits")
if u_limits and u_start:
# make sure that no values in u0 violates the u_limits conditions
u_start = np.array(u_start)
u_start[u_start < u_limits[0]] = u_limits[0]
u_start[u_start > u_limits[1]] = u_limits[1]
if u_start is not None:
for idx, u0 in enumerate(u_start):
for k in range(K):
u[0, idx, :, k] = u0
if beta:
warnings.warn("Causion: Using experimental feature time-penalty")
if phase_sensitive:
_fidelity_function = lambda x: x
else:
_fidelity_function = lambda x: abs(x) ** 2
best_k = 1
_r = 0
_prev_overlap = 0
progress_bar.start(R)
for r in range(R - 1):
progress_bar.update(r)
_r = r
eps_log[r] = eps_vec[best_k]
logger.debug("eps_vec: {}".format(eps_vec))
_t0 = time.time()
dt = times[1] - times[0]
if use_interp:
ip_funcs = [interp1d(times, u[r, j, :, best_k], kind=interp_kind,
bounds_error=False,
fill_value=u[r, j, -1, best_k])
for j in range(J)]
def _H_t(t, args=None):
return H0 + sum([float(ip_funcs[j](t)) * H_ops[j]
for j in range(J)])
U_list = [(-1j * _H_t(times[idx]) * dt).expm()
for idx in range(M-1)]
else:
def _H_idx(idx):
return H0 + sum([u[r, j, idx, best_k] * H_ops[j]
for j in range(J)])
U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)]
logger.debug("Time 1: %fs" % (time.time() - _t0))
_t0 = time.time()
U_f_list = []
U_b_list = []
U_f = 1
U_b = 1
for m in range(M - 1):
U_f = U_list[m] * U_f
U_f_list.append(U_f)
U_b_list.insert(0, U_b)
U_b = U_list[M - 2 - m].dag() * U_b
logger.debug("Time 2: %fs" % (time.time() - _t0))
_t0 = time.time()
for j in range(J):
for m in range(M-1):
P = U_b_list[m] * U
Q = 1j * dt * H_ops[j] * U_f_list[m]
if phase_sensitive:
du = - cy_overlap(P.data, Q.data)
else:
du = (- 2 * cy_overlap(P.data, Q.data) *
cy_overlap(U_f_list[m].data, P.data))
if alpha:
# penalty term for high power control signals u
du += -2 * alpha * u[r, j, m, best_k] * dt
if beta:
# penalty term for late control signals u
du += -2 * beta * k ** 2 * u[r, j, k] * dt
for k, eps_val in enumerate(eps_vec):
u[r + 1, j, m, k] = u[r, j, m, k] + eps_val * du.real
if u_limits:
if u[r + 1, j, m, k] < u_limits[0]:
u[r + 1, j, m, k] = u_limits[0]
elif u[r + 1, j, m, k] > u_limits[1]:
u[r + 1, j, m, k] = u_limits[1]
u[r + 1, j, -1, :] = u[r + 1, j, -2, :]
logger.debug("Time 3: %fs" % (time.time() - _t0))
_t0 = time.time()
for k, eps_val in enumerate(eps_vec):
def _H_idx(idx):
return H0 + sum([u[r + 1, j, idx, k] * H_ops[j]
for j in range(J)])
U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)]
Uf[k] = gate_sequence_product(U_list)
_k_overlap[k] = _fidelity_function(cy_overlap(Uf[k].data,
U.data)).real
best_k = np.argmax(_k_overlap)
logger.debug("k_overlap: ", _k_overlap, best_k)
if _prev_overlap > _k_overlap[best_k]:
logger.debug("Regression, stepping back with smaller eps.")
u[r + 1, :, :, :] = u[r, :, :, :]
eps_vec /= 2
else:
if best_k == 0:
eps_vec /= 2
elif best_k == 2:
eps_vec *= 2
_prev_overlap = _k_overlap[best_k]
overlap_log[r] = _k_overlap[best_k]
if overlap_terminate < 1.0:
if _k_overlap[best_k] > overlap_terminate:
logger.info("Reached target fidelity, terminating.")
break
logger.debug("Time 4: %fs" % (time.time() - _t0))
_t0 = time.time()
if use_interp:
ip_funcs = [interp1d(times, u[_r, j, :, best_k], kind=interp_kind,
bounds_error=False, fill_value=u[R - 1, j, -1])
for j in range(J)]
H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)]
for j in range(J)]
else:
H_td_func = [H0] + [[H_ops[j], u[_r, j, :, best_k]] for j in range(J)]
progress_bar.finished()
result = GRAPEResult(u=u[:_r, :, :, best_k], U_f=Uf[best_k],
H_t=H_td_func)
result.eps = eps_log
result.overlap = overlap_log
return result
|
bsd-3-clause
| 3,366,839,895,533,289,500
| 29.931889
| 79
| 0.517015
| false
| 3.389652
| false
| false
| false
|
maximx1/zfscan
|
zfscan/commands/CommandLineStatements.py
|
1
|
1075
|
class CommandLineStatements:
"""Batch of command line statements surrounding zfs"""
"""Checks the disks in the system"""
check_disks = "ls -la /dev/disk/by-id"
"""
zpool command to check status
{0} = pool
"""
zpool_status = "sudo zpool status {0}"
"""
zpool command to turn off drive access
{0} = pool
{1} = drive
"""
zpool_offline = "sudo zpool offline {0} {1}"
"""
zpool command to turn on drive access
{0} = pool
{1} = drive
"""
zpool_online = "sudo zpool online {0} {1}"
"""
zpool replace command
{0} = pool
{1} = old
{2} = new
"""
zpool_replace = "sudo zpool replace {0} {1} {2} -f"
"""zpool import command to show available imports"""
zpool_import_list = "sudo zpool import"
"""
zpool import command
{0} = importable pool
"""
zpool_import = "sudo zpool import {0}"
"""zfs list command"""
zfs_list = "sudo zfs list"
"""Disk usage"""
disk_usage = "df -h"
|
mit
| -2,060,873,528,293,840,100
| 21.416667
| 58
| 0.525581
| false
| 3.583333
| false
| false
| false
|
svp-dev/slcore
|
slc/tools/slc/mt/mtsparc/regdefs.py
|
1
|
15832
|
from ..common.regmagic import RegMagic
class RegDefs:
iargregs = 8
ilocalregs = 23
# There is not FP support for now, but we define this to keep the
# common implementation happy.
fargregs = 8
flocalregs = 24
regprefix = '%'
regprefix_fmt = '%%'
canon_is_numeric = False
comprefix = '!'
movinsn = 'mov'
fmovinsn = 'fmovs'
# offset in register window of
# first local register
mt_locals_offset = 1
mt_flocals_offset = 0
legacy_regs = {
# globals
'g1' : 1, # always tmp
'g2' : 2, # app reg
'g3' : 3, # app reg
'g4' : 4, # app reg
'g5' : 5, # OS reg
'g6' : 6, # OS reg
'g7' : 7, # OS reg
# output regs
'o5' : 13,
'o4' : 12,
'o3' : 11,
'o2' : 10,
'o1' : 9,
'o0' : 8,
# address of CALL (RA?)
'o7' : 15,
# locals
'l0' : 16,
'l1' : 17,
'l2' : 18,
'l3' : 19,
'l4' : 20,
'l5' : 21,
'l6' : 22,
'l7' : 23,
# inputs
'i5' : 29,
'i4' : 28,
'i3' : 27,
'i2' : 26,
'i1' : 25,
'i0' : 24,
# RA - 8
'i7' : 31,
# zero
'g0' : 0,
# stack pointer
'sp' : 14,
'o6' : 14,
# frame pointer
'fp' : 30,
'i6' : 30
}
legacy_fregs = dict((('f%d' % i, i) for i in xrange(0,32)))
######################
# GCC Allocation order
######################
# /* This is the order in which to allocate registers normally.
# We put %f0-%f7 last among the float registers, so as to make it more
# likely that a pseudo-register which dies in the float return register
# area will get allocated to the float return register, thus saving a move
# instruction at the end of the function.
# Similarly for integer return value registers.
# We know in this case that we will not end up with a leaf function.
# The register allocator is given the global and out registers first
# because these registers are call clobbered and thus less useful to
# global register allocation.
# Next we list the local and in registers. They are not call clobbered
# and thus very useful for global register allocation. We list the input
# registers before the locals so that it is more likely the incoming
# arguments received in those registers can just stay there and not be
# reloaded. */
# #define REG_ALLOC_ORDER \
# { 1, 2, 3, 4, 5, 6, 7, /* %g1-%g7 */ \
# 13, 12, 11, 10, 9, 8, /* %o5-%o0 */ \
# 15, /* %o7 */ \
# 16, 17, 18, 19, 20, 21, 22, 23, /* %l0-%l7 */ \
# 29, 28, 27, 26, 25, 24, 31, /* %i5-%i0,%i7 */\
# 40, 41, 42, 43, 44, 45, 46, 47, /* %f8-%f15 */ \
# 48, 49, 50, 51, 52, 53, 54, 55, /* %f16-%f23 */ \
# 56, 57, 58, 59, 60, 61, 62, 63, /* %f24-%f31 */ \
# 64, 65, 66, 67, 68, 69, 70, 71, /* %f32-%f39 */ \
# 72, 73, 74, 75, 76, 77, 78, 79, /* %f40-%f47 */ \
# 80, 81, 82, 83, 84, 85, 86, 87, /* %f48-%f55 */ \
# 88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
# 39, 38, 37, 36, 35, 34, 33, 32, /* %f7-%f0 */ \
# 96, 97, 98, 99, /* %fcc0-3 */ \
# 100, 0, 14, 30, 101} /* %icc, %g0, %o6, %i6, %sfp */
#
#
# Default reg usage:
# /* 1 for registers that have pervasive standard uses
# and are not available for the register allocator.
# On non-v9 systems:
# g1 is free to use as temporary.
# g2-g4 are reserved for applications. Gcc normally uses them as
# temporaries, but this can be disabled via the -mno-app-regs option.
# g5 through g7 are reserved for the operating system.
# On v9 systems:
# g1,g5 are free to use as temporaries, and are free to use between calls
# if the call is to an external function via the PLT.
# g4 is free to use as a temporary in the non-embedded case.
# g4 is reserved in the embedded case.
# g2-g3 are reserved for applications. Gcc normally uses them as
# temporaries, but this can be disabled via the -mno-app-regs option.
# g6-g7 are reserved for the operating system (or application in
# embedded case).
# ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must
# currently be a fixed register until this pattern is rewritten.
# Register 1 is also used when restoring call-preserved registers in large
# stack frames.
# */
# #define FIXED_REGISTERS \
# {1, 0, 2, 2, 2, 2, 1, 1, \
# 0, 0, 0, 0, 0, 0, 1, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 1, 1, \
# \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# \
# 0, 0, 0, 0, 0, 1}
# /* 1 for registers not available across function calls.
# These must include the FIXED_REGISTERS and also any
# registers that can be used without being saved.
# The latter must include the registers where values are returned
# and the register where structure-value addresses are passed.
# Aside from that, you can include as many other registers as you like. */
# #define CALL_USED_REGISTERS \
# {1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 0, 0, 0, 0, 0, 0, 0, 0, \
# 0, 0, 0, 0, 0, 0, 1, 1, \
# \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# 1, 1, 1, 1, 1, 1, 1, 1, \
# \
# 1, 1, 1, 1, 1, 1}
# REMINDER:
# SPARC reg sems
# %g0 (r00) always zero
# %g1 (r01) [1] temporary value
# %g2 (r02) [2] global 2
# global %g3 (r03) [2] global 3
# %g4 (r04) [2] global 4
# %g5 (r05) reserved for SPARC ABI
# %g6 (r06) reserved for SPARC ABI
# %g7 (r07) reserved for SPARC ABI
# %o0 (r08) [3] outgoing parameter 0 / return value from callee
# %o1 (r09) [1] outgoing parameter 1
# %o2 (r10) [1] outgoing parameter 2
# out %o3 (r11) [1] outgoing parameter 3
# %o4 (r12) [1] outgoing parameter 4
# %o5 (r13) [1] outgoing parameter 5
# %sp, %o6 (r14) [1] stack pointer
# %o7 (r15) [1] temporary value / address of CALL instruction
# %l0 (r16) [3] local 0
# %l1 (r17) [3] local 1
# %l2 (r18) [3] local 2
# local %l3 (r19) [3] local 3
# %l4 (r20) [3] local 4
# %l5 (r21) [3] local 5
# %l6 (r22) [3] local 6
# %l7 (r23) [3] local 7
# %i0 (r24) [3] incoming parameter 0 / return value to caller
# %i1 (r25) [3] incoming parameter 1
# %i2 (r26) [3] incoming parameter 2
# in %i3 (r27) [3] incoming parameter 3
# %i4 (r28) [3] incoming parameter 4
# %i5 (r29) [3] incoming parameter 5
# %fp, %i6 (r30) [3] frame pointer
# %i7 (r31) [3] return address - 8
# Notes:
# [1] assumed by caller to be destroyed (volatile) across a procedure call
# [2] should not be used by SPARC ABI library code
# [3] assumed by caller to be preserved across a procedure call
# /* This is the order in which to allocate registers for
# leaf functions. If all registers can fit in the global and
# output registers, then we have the possibility of having a leaf
# function.
# The macro actually mentioned the input registers first,
# because they get renumbered into the output registers once
# we know really do have a leaf function.
# To be more precise, this register allocation order is used
# when %o7 is found to not be clobbered right before register
# allocation. Normally, the reason %o7 would be clobbered is
# due to a call which could not be transformed into a sibling
# call.
# As a consequence, it is possible to use the leaf register
# allocation order and not end up with a leaf function. We will
# not get suboptimal register allocation in that case because by
# definition of being potentially leaf, there were no function
# calls. Therefore, allocation order within the local register
# window is not critical like it is when we do have function calls. */
# #define REG_LEAF_ALLOC_ORDER \
# { 1, 2, 3, 4, 5, 6, 7, /* %g1-%g7 */ \
# 29, 28, 27, 26, 25, 24, /* %i5-%i0 */ \
# 15, /* %o7 */ \
# 13, 12, 11, 10, 9, 8, /* %o5-%o0 */ \
# 16, 17, 18, 19, 20, 21, 22, 23, /* %l0-%l7 */ \
# 40, 41, 42, 43, 44, 45, 46, 47, /* %f8-%f15 */ \
# 48, 49, 50, 51, 52, 53, 54, 55, /* %f16-%f23 */ \
# 56, 57, 58, 59, 60, 61, 62, 63, /* %f24-%f31 */ \
# 64, 65, 66, 67, 68, 69, 70, 71, /* %f32-%f39 */ \
# 72, 73, 74, 75, 76, 77, 78, 79, /* %f40-%f47 */ \
# 80, 81, 82, 83, 84, 85, 86, 87, /* %f48-%f55 */ \
# 88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
# 39, 38, 37, 36, 35, 34, 33, 32, /* %f7-%f0 */ \
# 96, 97, 98, 99, /* %fcc0-3 */ \
# 100, 0, 14, 30, 31, 101} /* %icc, %g0, %o6, %i6, %i7, %sfp */
reg_mapping = {
# MT Globals
'g0' : 'l7',
'g1' : 'l6',
'g2' : 'l5',
'g3' : 'l4',
'g4' : 'l3',
'g5' : 'l2',
'g6' : 'l1',
'g7' : 'l0',
'gf0' : 'f31',
'gf1' : 'f30',
'gf2' : 'f29',
'gf3' : 'f28',
'gf4' : 'f27',
'gf5' : 'f26',
'gf6' : 'f25',
'gf7' : 'f24',
# MT Shareds
's0' : 'l0',
'd0' : 'l1',
's1' : 'l2',
'd1' : 'l3',
's2' : 'l4',
'd2' : 'l5',
's3' : 'l6',
'd3' : 'l7',
'sf0' : 'f24',
'df0' : 'f25',
'sf1' : 'f26',
'df1' : 'f27',
'sf2' : 'f28',
'df2' : 'f29',
'sf3' : 'f30',
'df3' : 'f31',
# Special locals
'l0' : 'g1', # temp phy 1
'l1' : 'g2', # app reg phy 2
'l2' : 'g3', # app reg phy 3
'l3' : 'g4', # app reg phy 4
'l4' : 'g5', # OS reg phy 5
'l5' : 'g6', # OS reg phy 6
'l6' : 'g7', # OS reg phy 7
'l7' : 'o0', # phy 8
'l8' : 'o1', # phy 9
'l9' : 'o2', # phy 10
'l10' : 'o3', # phy 11
'l11' : 'o4', # phy 12
'l12' : 'o5', # phy 13
'l13' : 'sp', # o6 phy 14
'l14' : 'o7', # CALL HWIRED phy 15
'l15' : 'i0', # phy 16
'l16' : 'i1', # phy 17
'l17' : 'i2', # phy 18
'l18' : 'i3', # phy 19
'l19' : 'i4', # phy 20
'l20' : 'i5', # phy 21
'l21' : 'fp', # i6 phy 22
'l22' : 'i7', # RA-8 phy 23
'l31' : 'g0', # ZERO
'lf0' : 'f0',
'lf1' : 'f1',
'lf2' : 'f2',
'lf3' : 'f3',
'lf4' : 'f4',
'lf5' : 'f5',
'lf6' : 'f6',
'lf7' : 'f7',
'lf8' : 'f8',
'lf9' : 'f9',
'lf10' : 'f10',
'lf11' : 'f11',
'lf12' : 'f12',
'lf13' : 'f13',
'lf14' : 'f14',
'lf15' : 'f15',
'lf16' : 'f16',
'lf17' : 'f17',
'lf18' : 'f18',
'lf19' : 'f19',
'lf20' : 'f20',
'lf21' : 'f21',
'lf22' : 'f22',
'lf23' : 'f23'
}
reg_aliases = {
'tlsp' : 'l13',
'fp' : 'l21',
'idx_init' : 'l0',
'extra_init' : 'l1',
'zero' : 'l31',
'ra' : 'l22',
'ra_leaf' : 'l14',
'callreg' : 'l14'
}
## FIRST IDEA FOR MAPPING
## BASED ON GCC ALLOC ORDER
## -> problem: save/restore uses std/ldd,
## requires "i" locals to lie next to each other
## at even-numbered register slots.
# 'l7' : 'o5',
# 'l8' : 'o4',
# 'l9' : 'o3',
# 'l10' : 'o2',
# 'l11' : 'o1',
# 'l12' : 'o0',
# 'l13' : 'i5',
# 'l14' : 'i4',
# 'l15' : 'o7', # HARD-WIRED INTO CALL: l15 must assemble to %r15 = %o7!
# 'l16' : 'i3',
# 'l17' : 'i2',
# 'l18' : 'i1',
# 'l19' : 'i0',
# 'l20' : 'i7', # RA - 8
# 'l21' : 'sp',
# 'l22' : 'fp',
# def __init__(self):
# import sys
# print >>sys.stderr, "---snip here---\n#! /usr/bin/sed -f"
# rm = self.reg_mapping
# leg = set([rm[k] for k in rm])
# m = {}
# for r in leg:
# m[r] = [k for k in rm if rm[k] == r]
# if m[r][0].startswith('l'):
# m[r][0] += '_' + r
# for k,v in m.items():
# print >>sys.stderr, 's/%%%s/%%t%s/g;' % (k,''.join(v))
# print >>sys.stderr,'s/%tl31/%g0/g;'
# print >>sys.stderr, "---snip here---"
def post_init_regmagic(self, rm):
# all virtual register names, except SP
rm.allregs = set(['$l%d' % x for x in xrange(31 - self.iargregs) if x not in (14,)] + \
['$lf%d' % x for x in xrange(31 - self.fargregs)] + \
['$g%d' % x for x in xrange(self.iargregs)] + \
['$gf%d' % x for x in xrange(self.fargregs)] + \
['$d%d' % x for x in xrange(self.iargregs / 2)] + \
['$df%d' % x for x in xrange(self.fargregs / 2)] + \
['$%d' % x for x in xrange(1,32)])
### Register lists for the compile commands ###
rm.fixed_registers = []
for (v, l) in [("f", rm._freg_inv), ("",rm._reg_inv)]:
fixed = set()
for rl in l:
for r in rl:
if r['cat'] in 'sdg':
fixed.add(r['legnr'])
for r in fixed:
rm.fixed_registers.append('%%%s%d' % (v, r))
### Retain the numeric aliases for all registers
for i in xrange(0, 32):
RegDefs.legacy_regs['r%d' % i] = i
RegDefs.legacy_regs['f%d' % i] = i
regmagic = RegMagic(RegDefs())
__all__ = ['regmagic']
|
gpl-3.0
| 2,093,495,678,223,844,000
| 34.981818
| 95
| 0.417256
| false
| 2.916191
| false
| false
| false
|
google/rekall
|
rekall-core/rekall/plugins/addrspaces/intel.py
|
1
|
25245
|
# Rekall Memory Forensics
#
# Copyright 2015 Google Inc. All Rights Reserved.
# Authors:
# Michael Cohen <scudette@google.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Implement the base translating address spaces.
This is a complete rewrite of the previous translating address spaces
implemented in Rekall. The goals are:
1) To make a system that is provable and traceable - i.e. It should be possible
to trace the address translation process step by step as it is performed by
Rekall so we can verify how it is implemented.
2) The system must be very fast at the same time. Address translation can be an
expensive operation so we need to ensure we are very quick.
3) The system must be extensible and modifiable. Address translation is a
complex algorithm and varies a lot between operating systems and
architectures. Therefore this implementation is generic and tries to
encapsulate all the nuances of address translation in the OS specific
implementation itself.
How does it work?
-----------------
There are a few main entry points into the translating Address Spaces:
1) vtop(): (Virtual to Physical) This method accepts a virtual address and
translates it to the physical address in the base address space. This is the
workhorse method. It is designed to be very fast but does not give too much
information about how the translation was performed.
2) describe_vtop(): This is the describing sister method of vtop(). It returns a
list of AddressTranslationDescriptor() objects. Each of these describes a
specific step in the translation process. If one was to render each step,
this outlines exactly what happened in each step and how the address is
derived. If the address space translation process succeeds the last
descriptor will be a PhysicalAddressDescriptor() instance which describes the
final physical address. Note that the translation process may request files
to be mapped into the physical address space, so the
PhysicalAddressDescriptor() will point at mapped files (i.e. it may not
actually refer to the physical memory image).
3) get_mappings(): This method generates Run instances which encapsulate each
region available in the virtual address space.
The vtop() method and the describe_vtop() method are very similar since they
implement the same algorithms. However, we do not want to implement the same
thing twice because that leads to maintenance problems and subtle
bugs. Therefore vtop() is simply a wrapper around describe_vtop(). To achieve
the required performance vtop() simply looks for the PhysicalAddressDescriptor()
and returns it. This is essentially a noop for any of the other descriptors and
therefore maintains the same speed benefits.
"""
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.builtins import basestring
from builtins import object
import io
import struct
from rekall import addrspace
from rekall import config
from rekall import obj
from rekall.ui import text as text_renderer
from rekall_lib import utils
config.DeclareOption(
"dtb", group="Autodetection Overrides",
type="IntParser", help="The DTB physical address.")
config.DeclareOption(
"kernel_slide", group="Autodetection Overrides",
type="IntParser", help="Shift for Linux KASLR (see find_kaslr plugin).")
PAGE_SHIFT = 12
PAGE_MASK = ~ 0xFFF
class AddressTranslationDescriptor(object):
"""A descriptor of a step in the translation process.
This is a class because there may be OS specific steps in the address
translation.
"""
object_name = None
def __init__(self, object_name=None, object_value=None, object_address=None,
session=None):
if object_name:
self.object_name = object_name
self.object_value = object_value
self.object_address = object_address
self.session = session
def render(self, renderer):
"""Render this step."""
if self.object_address is not None:
# Properly format physical addresses.
renderer.format(
"{0}@ {1} = {2:addr}\n",
self.object_name,
self.session.physical_address_space.describe(
self.object_address),
self.object_value or 0)
elif self.object_value:
renderer.format("{0} {1}\n",
self.object_name,
self.session.physical_address_space.describe(
self.object_value))
else:
renderer.format("{0}\n", self.object_name)
class CommentDescriptor(object):
def __init__(self, comment, *args, **kwargs):
self.session = kwargs.pop("session", None)
self.comment = comment
self.args = args
def render(self, renderer):
renderer.format(self.comment, *self.args)
class InvalidAddress(CommentDescriptor):
"""Mark an invalid address.
This should be the last descriptor in the collection sequence.
"""
class DescriptorCollection(object):
def __init__(self, session):
self.session = session
self.descriptors = []
def add(self, descriptor_cls, *args, **kwargs):
self.descriptors.append((descriptor_cls, args, kwargs))
def __iter__(self):
for cls, args, kwargs in self.descriptors:
kwargs["session"] = self.session
yield cls(*args, **kwargs)
def __getitem__(self, item):
"""Get a particular descriptor.
Descriptors can be requested by name (e.g. VirtualAddressDescriptor) or
index (e.g. -1).
"""
if isinstance(item, basestring):
for descriptor_cls, args, kwargs in self.descriptors:
if descriptor_cls.__name__ == item:
kwargs["session"] = self.session
return descriptor_cls(*args, **kwargs)
return obj.NoneObject("No descriptor found.")
try:
cls, args, kwargs = self.descriptors[item]
kwargs["session"] = self.session
return cls(*args, **kwargs)
except KeyError:
return obj.NoneObject("No descriptor found.")
def __str__(self):
"""Render ourselves into a string."""
fd = io.StringIO()
ui_renderer = text_renderer.TextRenderer(
session=self.session, fd=fd)
with ui_renderer.start():
for descriptor in self:
descriptor.render(ui_renderer)
return fd.getvalue()
class PhysicalAddressDescriptorCollector(DescriptorCollection):
"""A descriptor collector which only cares about PhysicalAddressDescriptor.
This allows us to reuse all the code in describing the address space
resolution and cheaply implement the standard vtop() method.
"""
physical_address = None
def add(self, descriptor_cls, *_, **kwargs):
if descriptor_cls is PhysicalAddressDescriptor:
address = kwargs.pop("address")
self.physical_address = address
class PhysicalAddressDescriptor(AddressTranslationDescriptor):
"""A descriptor to mark the final physical address resolution."""
def __init__(self, address=0, session=None):
super(PhysicalAddressDescriptor, self).__init__(session=session)
self.address = address
def render(self, renderer):
renderer.format(
"Physical Address {0}\n",
self.session.physical_address_space.describe(self.address))
class VirtualAddressDescriptor(AddressTranslationDescriptor):
"""Mark a virtual address."""
def __init__(self, address=0, dtb=0, session=None):
super(VirtualAddressDescriptor, self).__init__(session=session)
self.dtb = dtb
self.address = address
def render(self, renderer):
renderer.format(
"Virtual Address {0:style=address} (DTB {1:style=address})\n",
self.address, self.dtb)
class IA32PagedMemory(addrspace.PagedReader):
"""Standard x86 32 bit non PAE address space.
Provides an address space for IA32 paged memory, aka the x86
architecture, without Physical Address Extensions (PAE). Allows
callers to map virtual address to offsets in physical memory.
Create a new IA32 address space without PAE to sit on top of
the base address space and a Directory Table Base (CR3 value)
of 'dtb'.
Comments in this class mostly come from the Intel(R) 64 and IA-32
Architectures Software Developer's Manual Volume 3A: System Programming
Guide, Part 1, revision 031, pages 4-8 to 4-15. This book is available
for free at http://www.intel.com/products/processor/manuals/index.htm.
Similar information is also available from Advanced Micro Devices (AMD)
at http://support.amd.com/us/Processor_TechDocs/24593.pdf.
This address space implements paging as described in section "4.3 32-BIT
PAGING" of the above book.
This is simplified from previous versions of rekall, by removing caching
and automated DTB searching (which is now performed by specific plugins in
an OS specific way).
"""
order = 70
valid_mask = 1
def __init__(self, name=None, dtb=None, **kwargs):
"""Instantiate an Intel 32 bit Address space over the layered AS.
Args:
dtb: The dtb address.
"""
super(IA32PagedMemory, self).__init__(**kwargs)
# We must be stacked on someone else:
if self.base == None:
raise TypeError("No base Address Space")
# If the underlying address space already knows about the dtb we use it.
# Allow the dtb to be specified in the session.
self.dtb = dtb or self.session.GetParameter("dtb")
if not self.dtb != None:
raise TypeError("No valid DTB specified. Try the find_dtb"
" plugin to search for the dtb.")
self.name = (name or 'Kernel AS') + "@%#x" % self.dtb
# Use a TLB to make this faster.
self._tlb = addrspace.TranslationLookasideBuffer(1000)
self._cache = utils.FastStore(100)
# Some important masks we can use.
# Is the pagesize flags on?
self.page_size_mask = (1 << 7)
def vtop(self, vaddr):
"""Translates virtual addresses into physical offsets.
The function should return either None (no valid mapping)
or the offset in physical memory where the address maps.
This function is simply a wrapper around describe_vtop() which does all
the hard work. You probably never need to override it.
"""
vaddr = int(vaddr)
try:
return self._tlb.Get(vaddr)
except KeyError:
# The TLB accepts only page aligned virtual addresses.
aligned_vaddr = vaddr & self.PAGE_MASK
collection = self.describe_vtop(
aligned_vaddr, PhysicalAddressDescriptorCollector(self.session))
self._tlb.Put(aligned_vaddr, collection.physical_address)
return self._tlb.Get(vaddr)
def vtop_run(self, addr):
phys_addr = self.vtop(addr)
if phys_addr is not None:
return addrspace.Run(
start=addr,
end=addr,
file_offset=phys_addr,
address_space=self.base)
def describe_vtop(self, vaddr, collection=None):
"""A generator of descriptive statements about stages in translation.
While the regular vtop is called very frequently and therefore must be
fast, this variation is used to examine the translation process in
detail. We therefore emit data about each step of the way - potentially
re-implementing the vtop() method above, but yielding intermediate
results.
Args:
vaddr: The address to translate.
collection: An instance of DescriptorCollection() which will receive
the address descriptors. If not provided we create a new collection.
Returns
A list of AddressTranslationDescriptor() instances.
"""
if collection is None:
collection = DescriptorCollection(self.session)
# Bits 31:12 are from CR3.
# Bits 11:2 are bits 31:22 of the linear address.
pde_addr = ((self.dtb & 0xfffff000) |
((vaddr & 0xffc00000) >> 20))
pde_value = self.read_pte(pde_addr, collection=collection)
collection.add(AddressTranslationDescriptor,
object_name="pde", object_value=pde_value,
object_address=pde_addr)
if not pde_value & self.valid_mask:
collection.add(InvalidAddress, "Invalid PDE")
return collection
# Large page PDE.
if pde_value & self.page_size_mask:
# Bits 31:22 are bits 31:22 of the PDE
# Bits 21:0 are from the original linear address
physical_address = (pde_value & 0xffc00000) | (vaddr & 0x3fffff)
collection.add(CommentDescriptor, "Large page mapped\n")
collection.add(PhysicalAddressDescriptor, address=physical_address)
return collection
# Bits 31:12 are from the PDE
# Bits 11:2 are bits 21:12 of the linear address
pte_addr = (pde_value & 0xfffff000) | ((vaddr & 0x3ff000) >> 10)
pte_value = self.read_pte(pte_addr, collection=collection)
self.describe_pte(collection, pte_addr, pte_value, vaddr)
return collection
def describe_pte(self, collection, pte_addr, pte_value, vaddr):
collection.add(AddressTranslationDescriptor,
object_name="pte", object_value=pte_value,
object_address=pte_addr)
if pte_value & self.valid_mask:
# Bits 31:12 are from the PTE
# Bits 11:0 are from the original linear address
phys_addr = ((pte_value & 0xfffff000) |
(vaddr & 0xfff))
collection.add(PhysicalAddressDescriptor, address=phys_addr)
else:
collection.add(InvalidAddress, "Invalid PTE")
return collection
def read_pte(self, addr, collection=None):
"""Read an unsigned 32-bit integer from physical memory.
Note this always succeeds - reads outside mapped addresses in the image
will simply return 0.
"""
_ = collection
string = self.base.read(addr, 4)
return struct.unpack('<I', string)[0]
def get_mappings(self, start=0, end=2**64):
"""Enumerate all valid memory ranges.
Yields:
tuples of (starting virtual address, size) for valid the memory
ranges.
"""
# Pages that hold PDEs and PTEs are 0x1000 bytes each.
# Each PDE and PTE is four bytes. Thus there are 0x1000 / 4 = 0x400
# PDEs and PTEs we must test
for pde in range(0, 0x400):
vaddr = pde << 22
if vaddr > end:
return
next_vaddr = (pde + 1) << 22
if start > next_vaddr:
continue
pde_addr = ((self.dtb & 0xfffff000) |
(vaddr & 0xffc00000) >> 20)
pde_value = self.read_pte(pde_addr)
if not pde_value & self.valid_mask:
continue
# PDE is for a large page.
if pde_value & self.page_size_mask:
yield addrspace.Run(
start=vaddr,
end=vaddr + 0x400000,
file_offset=(pde_value & 0xffc00000) | (vaddr & 0x3fffff),
address_space=self.base)
continue
# This reads the entire PTE table at once - On
# windows where IO is extremely expensive, its
# about 10 times more efficient than reading it
# one value at the time - and this loop is HOT!
pte_table_addr = ((pde_value & 0xfffff000) |
((vaddr & 0x3ff000) >> 10))
data = self.base.read(pte_table_addr, 4 * 0x400)
pte_table = struct.unpack("<" + "I" * 0x400, data)
tmp1 = vaddr
for i, pte_value in enumerate(pte_table):
vaddr = tmp1 | i << 12
if vaddr > end:
return
next_vaddr = tmp1 | ((i + 1) << 12)
if start > next_vaddr:
continue
if pte_value & self.valid_mask:
yield addrspace.Run(
start=vaddr,
end=vaddr + 0x1000,
file_offset=(pte_value & 0xfffff000) | (vaddr & 0xfff),
address_space=self.base)
def __str__(self):
return u"%s@0x%08X (%s)" % (self.__class__.__name__, self.dtb, self.name)
def __eq__(self, other):
return (super(IA32PagedMemory, self).__eq__(other) and
self.dtb == other.dtb and self.base == other.base)
def end(self):
return (2 ** 32) - 1
class IA32PagedMemoryPae(IA32PagedMemory):
"""Standard x86 32 bit PAE address space.
Provides an address space for IA32 paged memory, aka the x86
architecture, with Physical Address Extensions (PAE) enabled. Allows
callers to map virtual address to offsets in physical memory.
Comments in this class mostly come from the Intel(R) 64 and IA-32
Architectures Software Developer's Manual Volume 3A: System Programming
Guide, Part 1, revision 031, pages 4-15 to 4-23. This book is available
for free at http://www.intel.com/products/processor/manuals/index.htm.
Similar information is also available from Advanced Micro Devices (AMD)
at http://support.amd.com/us/Processor_TechDocs/24593.pdf.
This implements the translation described in Section "4.4.2 Linear-Address
Translation with PAE Paging".
"""
order = 80
__pae = True
def describe_vtop(self, vaddr, collection=None):
"""Explain how a specific address was translated.
Returns:
a list of AddressTranslationDescriptor() instances.
"""
if collection is None:
collection = DescriptorCollection(self.session)
# Bits 31:5 come from CR3
# Bits 4:3 come from bits 31:30 of the original linear address
pdpte_addr = ((self.dtb & 0xffffffe0) |
((vaddr & 0xC0000000) >> 27))
pdpte_value = self.read_pte(pdpte_addr)
collection.add(AddressTranslationDescriptor,
object_name="pdpte", object_value=pdpte_value,
object_address=pdpte_addr)
if not pdpte_value & self.valid_mask:
collection.add(InvalidAddress, "Invalid PDPTE")
return collection
# Bits 51:12 are from the PDPTE
# Bits 11:3 are bits 29:21 of the linear address
pde_addr = (pdpte_value & 0xfffff000) | ((vaddr & 0x3fe00000) >> 18)
self._describe_pde(collection, pde_addr, vaddr)
return collection
def _describe_pde(self, collection, pde_addr, vaddr):
pde_value = self.read_pte(pde_addr)
collection.add(AddressTranslationDescriptor,
object_name="pde", object_value=pde_value,
object_address=pde_addr)
if not pde_value & self.valid_mask:
collection.add(InvalidAddress, "Invalid PDE")
# Large page PDE accesses 2mb region.
elif pde_value & self.page_size_mask:
# Bits 51:21 are from the PDE
# Bits 20:0 are from the original linear address
physical_address = ((pde_value & 0xfffffffe00000) |
(vaddr & 0x1fffff))
collection.add(CommentDescriptor, "Large page mapped\n")
collection.add(PhysicalAddressDescriptor, address=physical_address)
else:
# Bits 51:12 are from the PDE
# Bits 11:3 are bits 20:12 of the original linear address
pte_addr = (pde_value & 0xffffffffff000) | ((vaddr & 0x1ff000) >> 9)
pte_value = self.read_pte(pte_addr)
self.describe_pte(collection, pte_addr, pte_value, vaddr)
def describe_pte(self, collection, pte_addr, pte_value, vaddr):
collection.add(AddressTranslationDescriptor,
object_name="pte", object_value=pte_value,
object_address=pte_addr)
if pte_value & self.valid_mask:
# Bits 51:12 are from the PTE
# Bits 11:0 are from the original linear address
physical_address = (pte_value & 0xffffffffff000) | (vaddr & 0xfff)
collection.add(PhysicalAddressDescriptor, address=physical_address)
else:
collection.add(InvalidAddress, "Invalid PTE\n")
return collection
def read_pte(self, addr, collection=None):
'''
Returns an unsigned 64-bit integer from the address addr in
physical memory. If unable to read from that location, returns None.
'''
try:
return self._cache.Get(addr)
except KeyError:
string = self.base.read(addr, 8)
result = struct.unpack('<Q', string)[0]
self._cache.Put(addr, result)
return result
def get_mappings(self, start=0, end=2**64):
"""A generator of address, length tuple for all valid memory regions."""
# Pages that hold PDEs and PTEs are 0x1000 bytes each.
# Each PDE and PTE is eight bytes. Thus there are 0x1000 / 8 = 0x200
# PDEs and PTEs we must test.
for pdpte_index in range(0, 4):
vaddr = pdpte_index << 30
if vaddr > end:
return
next_vaddr = (pdpte_index + 1) << 30
if start >= next_vaddr:
continue
# Bits 31:5 come from CR3
# Bits 4:3 come from bits 31:30 of the original linear address
pdpte_addr = (self.dtb & 0xffffffe0) | ((vaddr & 0xc0000000) >> 27)
pdpte_value = self.read_pte(pdpte_addr)
if not pdpte_value & self.valid_mask:
continue
tmp1 = vaddr
for pde_index in range(0, 0x200):
vaddr = tmp1 | (pde_index << 21)
if vaddr > end:
return
next_vaddr = tmp1 | ((pde_index + 1) << 21)
if start >= next_vaddr:
continue
# Bits 51:12 are from the PDPTE
# Bits 11:3 are bits 29:21 of the linear address
pde_addr = ((pdpte_value & 0xffffffffff000) |
((vaddr & 0x3fe00000) >> 18))
pde_value = self.read_pte(pde_addr)
if not pde_value & self.valid_mask:
continue
if pde_value & self.page_size_mask:
yield addrspace.Run(
start=vaddr,
end=vaddr+0x200000,
file_offset=(pde_value & 0xfffffffe00000) | (
vaddr & 0x1fffff),
address_space=self.base)
continue
# This reads the entire PTE table at once - On
# windows where IO is extremely expensive, its
# about 10 times more efficient than reading it
# one value at the time - and this loop is HOT!
pte_table_addr = ((pde_value & 0xffffffffff000) |
((vaddr & 0x1ff000) >> 9))
data = self.base.read(pte_table_addr, 8 * 0x200)
pte_table = struct.unpack("<" + "Q" * 0x200, data)
tmp2 = vaddr
for i, pte_value in enumerate(pte_table):
if pte_value & self.valid_mask:
vaddr = tmp2 | i << 12
if vaddr > end:
return
next_vaddr = tmp2 | (i + 1) << 12
if start >= next_vaddr:
continue
yield addrspace.Run(
start=vaddr,
end=vaddr+0x1000,
file_offset=((pte_value & 0xffffffffff000) |
(vaddr & 0xfff)),
address_space=self.base)
|
gpl-2.0
| 3,458,658,243,991,293,000
| 36.905405
| 81
| 0.605942
| false
| 4.16378
| false
| false
| false
|
MeGotsThis/BotGotsThis
|
pkg/channel/library.py
|
1
|
2842
|
import asyncio
import bot
from bot import utils
from typing import List, Optional, Union # noqa: F401
from lib.data import Send
from lib.data.message import Message
from lib.database import DatabaseMain
async def come(channel: str,
send: Send) -> bool:
bannedWithReason: Optional[str]
priority: Union[float, int]
db: DatabaseMain
async with DatabaseMain.acquire() as db:
bannedWithReason = await db.isChannelBannedReason(channel)
if bannedWithReason is not None:
send(f'Chat {channel} is banned from joining')
return True
priority = await db.getAutoJoinsPriority(channel)
joinResult: bool = utils.joinChannel(channel, priority)
if joinResult:
send(f'Joining {channel}')
else:
send(f'I am already in {channel}')
return True
async def leave(channel: str,
send: Send) -> bool:
if channel == bot.config.botnick:
return False
send(f'Bye {channel}')
await asyncio.sleep(1.0)
utils.partChannel(channel)
return True
async def auto_join(channel: str,
send: Send,
message: Message) -> bool:
db: DatabaseMain
async with DatabaseMain.acquire() as db:
bannedWithReason: Optional[str]
bannedWithReason = await db.isChannelBannedReason(channel)
if bannedWithReason is not None:
send(f'Chat {channel} is banned from joining')
return True
if len(message) >= 2:
removeMsgs: List[str] = ['0', 'false', 'no', 'remove', 'rem',
'delete', 'del', 'leave', 'part']
if message.lower[1] in removeMsgs:
return await auto_join_delete(db, channel, send)
return await auto_join_add(db, channel, send)
async def auto_join_add(db: DatabaseMain,
channel: str,
send: Send) -> bool:
result: bool = await db.saveAutoJoin(channel, 0)
priority: Union[int, float] = await db.getAutoJoinsPriority(channel)
wasInChat: bool = not utils.joinChannel(channel, priority)
if result and not wasInChat:
send(f'''\
Auto join for {channel} is now enabled and joined {channel} chat''')
elif not wasInChat:
send(f'''\
Auto join for {channel} is already enabled but now joined {channel} chat''')
else:
send(f'''\
Auto join for {channel} is already enabled and already in chat''')
return True
async def auto_join_delete(db: DatabaseMain,
channel: str,
send: Send) -> bool:
result: bool = await db.discardAutoJoin(channel)
if result:
send(f'Auto join for {channel} is now disabled')
else:
send(f'Auto join for {channel} was never enabled')
return True
|
gpl-3.0
| -3,594,922,672,161,938,000
| 32.023256
| 76
| 0.611268
| false
| 3.955432
| false
| false
| false
|
gnina/scripts
|
generate_counterexample_typeslines.py
|
1
|
11114
|
#!/usr/bin/env python3
'''
This script will generate the lines for a new types file with the iterative poses generated from counterexample_generation_jobs.py
!!WARNING!!
Part of this process is to determine which newly generated poses are NOT REDUNDANT with the previously generated ones.
This requires an O(n^2) calculation to calculate the RMSD between every pose...
Ergo, this calculation depending on the number of poses in a given pocket could take a very long time.
This script also works on all ligands present in the pocket, so there is the potential for multiple O(n^2) calculations to take place.
We have done our best to avoid needless calculations, but this is why we generate the lines for each pocket independently
ASSUMPTIONS:
i) Poses with <2 RMSD to the crystal pose will be labeled as positive poses
ii) you have obrms installed, and can run it from your commandline
iii) the jobfile provided as input contains the full PATH to the files specified.
iv) the gninatypes files (generated by gninatyper) for the poses in args.input have ALREADY BEEN generated.
v) The crystal ligand files are formatted PDBid_LignameLIGSUFFIX
vi) The OLD sdf file with the unique poses is named LignameOLDUNIQUESUFFIX
INPUT:
i) The path to the pocket you are working on
ii) the threshold RMSD to determine if they are the same pose
iii) the name for the txt file that contains the lines to write (will be written in the POCKET DIRECTORY)
iv) the suffix of the NEW sdf file that contains all of the unique poses
v) the commands file generated from counterexample_generation_jobs.py
vi) --OPTIONAL-- the suffix of the OLD sdf file that contains all of the unique poses
OUTPUT:
==Normal==
i) the typesfile lines to add to generate the new types file
ii) A SDF file containing all of the unique poses for a given ligand -- named LignameUNIQUE_SUFFIX
iii) a ___.sdf file which will be the working file for obrms.
'''
import argparse, re, subprocess, os, sys
import pandas as pd
from rdkit.Chem import AllChem as Chem
def check_exists(filename):
if os.path.isfile(filename) and os.path.getsize(filename)>0:
return True
else:
return False
def get_pocket_lines(filename,pocket):
'''
This function reads the lines from filename, and returns only the lines which contain pocket in them.
'''
all_lines=open(filename).readlines()
lines=[x for x in all_lines if pocket in x]
return lines
def calc_ligand_dic(lines,ligand_suffix):
'''
This function will parse the input list of lines and construct 2 dictionaries
1) ligand name -> [docked files with that ligand]
2) docked_filename -> crystal_file for that pose
'''
data={}
docked_lookup={}
for line in lines:
#1) Getting the crystal ligand file
ligfile=re.split('--autobox_ligand ',line)[1].split()[0]
#2) Getting the name of the ligand ** here we assume the ligfile is PATH/<PDBid>_<ligname><LIGSUFFIX>
ligname=ligfile.split('/')[-1].split(ligand_suffix)[0].split('_')[1]
#3) Check if ligname in data
if ligname not in data:
data[ligname]=[]
#4) grabbing the docked files
outfile=re.split('-o ',line)[1].split()[0]
#5) Adding these files to their corresponding places in the dictionary
data[ligname].append(outfile)
docked_lookup[outfile]=ligfile
return data, docked_lookup
def run_obrms(ligand_file,crystal_file):
'''
This function returns a list of rmsds of the docked ligand file to the crystal file. The list is in the order of the poses.
'''
rmsds=subprocess.check_output(f'obrms {ligand_file} {crystal_file}',shell=True)
rmsds=str(rmsds,'utf-8').rstrip().split('\n')
rmsds=[float(x.split()[-1]) for x in rmsds]
return rmsds
def get_lines_towrite(crystal_lookup,list_of_docked,affinity_lookup,crystal_suffix):
'''
This function will calculate the RMSD of every input pose, to the provided crystal pose.
returns a dictionary of lines --> 'docked pose filename':[lines to write]
'''
lines={}
for docked in list_of_docked:
#Figure out affinity.
affinity=0.0
crystal=crystal_lookup[docked]
cr_lookup=crystal.split(crystal_suffix)[0]
if cr_lookup in affinity_lookup:
affinity=affinity_lookup
print(docked,crystal)
rmsds=run_obrms(docked,crystal)
counter=0
lines[docked]=[]
for r in rmsds:
if r < 2:
label='1'
neg_aff=''
else:
label='0'
neg_aff='-'
rec_gninatypes=docked.split('rec')[0]+'rec_0.gninatypes'
lig_gninatypes=docked.replace('.sdf','_'+str(counter)+'.gninatypes')
lines[docked].append(f'{label} {neg_aff}{affinity} {r} {rec_gninatypes} {lig_gninatypes}\n')
counter+=1
return lines
def run_obrms_cross(filename):
'''
This function returns a pandas dataframe of the RMSD between every pose and every other pose, which is generated using obrms -x
'''
csv=subprocess.check_output('obrms -x '+filename,shell=True)
csv=str(csv,'utf-8').rstrip().split('\n')
data=pd.DataFrame([x.split(',')[1:] for x in csv],dtype=float)
return data
parser=argparse.ArgumentParser(description='Create lines to add to types files from counterexample generation. Assumes data file structure is ROOT/POCKET/FILES.')
parser.add_argument('-p','--pocket',type=str,required=True,help='Name of the pocket that you will be generating the lines for.')
parser.add_argument('-r','--root',type=str,required=True,help='PATH to the ROOT of the pockets.')
parser.add_argument('-i','--input',type=str,required=True,help='File that is output from counterexample_generation_jobs.py')
parser.add_argument('-cs','--crystal_suffix',default='_lig.pdb',help='Expresssion to glob the crystal ligand PDB. Defaults to _lig.pdb. Needs to match what was used with counterexample_generation_jobs.py')
parser.add_argument('--old_unique_suffix',type=str,default=None,help='Suffix for the unique ligand sdf file from a previous run. If set we will load that in and add to it. Default behavior is to generate it from provided input file.')
parser.add_argument('-us','--unique_suffix',type=str,default='_it1___.sdf',help='Suffix for the unique ligand sdf file for this run. Defaults to _it1___.sdf. One will be created for each ligand in the pocket.')
parser.add_argument('--unique_threshold',default=0.25,help='RMSD threshold for unique poses. IE poses with RMSD > thresh are considered unique. Defaults to 0.25.')
parser.add_argument('--lower_confusing_threshold',default=0.5,help='CNNscore threshold for identifying confusing good poses. Score < thresh & under 2RMSD is kept and labelled 1. 0<thresh<1. Default 0.5')
parser.add_argument('--upper_confusing_threshold',default=0.9,help='CNNscore threshold for identifying confusing poor poses. If CNNscore > thresh & over 2RMSD pose is kept and labelled 0. lower<thresh<1. Default 0.9')
parser.add_argument('-o','--outname',type=str,required=True,help='Name of the text file to write the new lines in. DO NOT WRITE THE FULL PATH!')
parser.add_argument('-a','--affinity_lookup',default='pdbbind2017_affs.txt',help='File mapping the PDBid and ligname of the ligand to its pK value. Assmes space delimited "PDBid ligname pK". Defaults to pdbbind2017_affs.txt')
args=parser.parse_args()
#Setting the myroot and root remove variable for use in the script
myroot=os.path.join(args.root,args.pocket,'')
root_remove=os.path.join(args.root,'')
#sanity check threshold
assert args.unique_threshold > 0, "Unique RMSD threshold needs to be positive"
assert 0<args.lower_confusing_threshold <1, "Lower_confusing_threshold needs to be in (0,1)"
assert args.lower_confusing_threshold<args.upper_confusing_threshold<1, "Upper_confusing_threshold needs to be in (lower_confusing_threshold,1)"
#generating our affinity lookup dictionary
affinity_lookup={}
with open(args.affinity_lookup) as infile:
for line in infile:
items=line.split()
key=items[0]+'_'+items[1]
val=items[2]
affinity_lookup[key]=val
#first we will generate the dictionary for the ligand - poses we will use.
tocheck=get_pocket_lines(args.input, args.pocket)
datadic, docked_to_crystal_lookup=calc_ligand_dic(tocheck,args.crystal_suffix)
#main loop of the script
with open(myroot+args.outname,'w') as outfile:
#loop over the ligands
for cr_name, list_o_ligs in datadic.items():
if cr_name!='iqz':
continue
#0) Make sure that the working sdf is free.
sdf_name=myroot+'___.sdf'
sdf_tmp=myroot+'___tmp.sdf'
#if this "___sdf" file already exists, we need to delete it and make a new one.
if check_exists(sdf_name):
os.remove(sdf_name)
#1) Figure out ALL of the lines to write
line_dic=get_lines_towrite(crystal_lookup=docked_to_crystal_lookup,list_of_docked=list_o_ligs,affinity_lookup=affinity_lookup,crystal_suffix=args.crystal_suffix)
#2) Set up the 'working sdf' for the obrms -x calculations, consisting of the confusing examples + any possible previously generated examples
# i) iterate over the possible lines for this ligand, keep only the confusing ones,
# and write the confusing poses into the working sdf file.
w=Chem.SDWriter(sdf_name)
keys=list(line_dic.keys())
for key in keys:
kept_lines=[]
supply=Chem.SDMolSupplier(key,sanitize=False)
for i,mol in enumerate(supply):
curr_line=line_dic[key][i]
score=mol.GetProp('CNNscore')
label=curr_line.split()[0]
#if scored "well", but was a bad pose
if float(score) > args.upper_confusing_threshold and label=='0':
kept_lines.append(curr_line)
w.write(mol)
#or if scored "poor", but was a good pose
elif float(score) < args.lower_confusing_threshold and label=='1':
kept_lines.append(curr_line)
w.write(mol)
#after the lines have been checked, we overwrite and only store the lines we kept.
line_dic[key]=kept_lines
w=None
# ii) Prepend ___.sdf with the previously existing unique poses sdf
offset=0
if args.old_unique_suffix:
print('Prepending existing similarity sdf to working sdf file')
old_sdfname=myroot+cr_name+args.old_unique_suffix
supply=Chem.SDMolSupplier(old_sdfname,sanitize=False)
offset=len(supply)
subprocess.check_call('mv %s %s'%(sdf_name,sdf_tmp),shell=True)
subprocess.check_call('cat %s %s > %s'%(old_sdfname,sdf_tmp,sdf_name),shell=True)
#3) run obrms -x working_sdf to calculate the rmsd between each pose. This is the O(n^2) calculation
unique_data=run_obrms_cross(sdf_name)
#4) determine the newly found "unique" poses
assignments={}
for (r,row) in unique_data.iterrows():
if r not in assignments:
for simi in row[row<args.unique_threshold].index:
if simi not in assignments:
assignments[simi]=r
to_remove=set([k for (k,v) in assignments.items() if k!=v])
#5) write the remaining lines for the newly found "unique" poses.
counter=offset
for key in keys:
for line in line_dic[key]:
if counter not in to_remove:
outfile.write(line.replace(root_remove,''))
counter+=1
#6) Write out the new "uniques" sdf file to allow for easier future generation
new_unique_sdfname=myroot+cr_name+args.unique_suffix
w=Chem.SDWriter(new_unique_sdfname)
supply=Chem.SDMolSupplier(sdf_name,sanitize=False)
for i,mol in enumerate(supply):
if i not in to_remove:
w.write(mol)
|
bsd-3-clause
| -4,488,924,076,670,344,700
| 43.103175
| 234
| 0.735739
| false
| 3.09496
| false
| false
| false
|
eschendel/gnum
|
gnlib/scripts/generate_nt_ascii_to_ctable.py
|
1
|
2926
|
#!/usr/bin/env python
## Copyright (c) 2015, Eric R. Schendel.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
##
## - Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## - Neither the name of gnum nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from string import lower
output_type = 'const gn_nucleotide_t'
array_name = 'five_bit_ascii_to_nucleotide'
default_nucleotide = 'A'
four_nucleotides_to_tag = {
'A': 'GN_NT_A',
'C': 'GN_NT_C',
'G': 'GN_NT_G',
'T': 'GN_NT_T',
}
# every listed encoding generates a unique ID with their 5 least significant bits
four_nucleotides_to_upper_ascii_encoding = {
'A': {'A', 'W', 'M', 'R', 'D', 'H', 'V', 'N'},
'C': {'C', 'S', 'Y', 'B'},
'G': {'G', 'K'},
'T': {'T', 'U'},
}
five_bit_ascii_to_nucleotide = {}
for nucleotide, ascii_set in four_nucleotides_to_upper_ascii_encoding.iteritems():
for ascii in ascii_set:
five_bit_ascii = ord(ascii) & 0b11111
print ascii, five_bit_ascii, nucleotide
five_bit_ascii_to_nucleotide[five_bit_ascii] = nucleotide
array_values = []
for index in range(32):
nucleotide = default_nucleotide
if index in five_bit_ascii_to_nucleotide:
nucleotide = five_bit_ascii_to_nucleotide[index]
array_values.append(four_nucleotides_to_tag[nucleotide])
print output_type, array_name+'[32] = {'
count = 0
for value in array_values:
if (count % 8) == 0:
if count != 0:
print ''
print ' ',
if (count+1) == len(array_values):
print value
else:
print str(value)+',',
count += 1
print '};'
|
bsd-3-clause
| -1,499,942,304,325,796,600
| 34.682927
| 82
| 0.685578
| false
| 3.414236
| false
| false
| false
|
andgoldschmidt/iEBE
|
check_prerequisites.py
|
1
|
8145
|
#! /usr/bin/env python
"""
Print a list of tests to see whether all required tools for Ebe calculations
are present.
"""
from os import getcwd, unlink, path
from subprocess import call
numberOfSpaces = 5
def printWarning(warningString):
print("-"*(numberOfSpaces-2) + "> " + warningString)
def printMsg(message):
print(" "*numberOfSpaces + message)
def checkCommand(cmdString, utilityName=None):
"""
Try to execute "cmdString", then use "utilityName" to echo messages.
"""
tempfile = open("response.txt", 'w')
if not utilityName: utilityName=cmdString
call("%s " % cmdString, shell=True, cwd=getcwd(), stdout = tempfile, stderr = tempfile)
tempfile.close()
if "command not found" in open("response.txt").readline():
printWarning("%s *NOT* installed." % utilityName)
unlink("response.txt")
return False
else:
printMsg("%s installed." % utilityName)
unlink("response.txt")
return True
def checkModule(moduleName):
"""
Try to import "moduleName", then echo messages.
"""
try:
__import__(moduleName)
printMsg("python %s module installed." % moduleName)
return True
except:
printWarning("python %s module *NOT* installed." % moduleName)
return False
def checkEnvironment():
"""
Check if the required compiler and running environment are complete.
Return True if the environment is complete, otherwise return False.
"""
finalMsgs = []
print("Start checking...")
print("-"*80)
# check g++ and icpc
if not checkCommand("g++") and not checkCommand("icpc"):
finalMsgs.append("You need to install icpc or g++.")
# check gfortran and ifort
if not checkCommand("gfortran") and not checkCommand("ifort"):
finalMsgs.append("You need to install ifort or gfortran.")
# check make utility
if not checkCommand("make"):
finalMsgs.append("You need to install the make utility.")
# check gsl
if not checkCommand("gsl-config", "gsl"):
finalMsgs.append("You need to install gsl library.")
# check zip and unzip
if not checkCommand("zip --help", "zip") or not checkCommand("unzip --help", "unzip"):
finalMsgs.append("You need both zip and unzip utilities.")
# check numpy
if not checkModule("numpy"):
finalMsgs.append("You need to install python numpy package.")
# print final messages
print("-"*80)
if not finalMsgs:
print("All essential packages installed. Test passed.")
return True
else:
for msg in finalMsgs: print(msg)
return False
def checkExecutables():
"""
Check if all the executables are present, and compile them if not all of
them are. Return True if all the executables can be successfully
generated.
"""
ebeNodeFolder = "EBE-Node"
executables = (
path.join("superMC", "superMC.e"),
path.join("VISHNew", "VISHNew.e"),
path.join("iSS", "iSS.e"),
path.join("iS", "iS.e"),
path.join("iS", "resonance.e"),
path.join("iS", "iInteSp.e"),
path.join("osc2u", "osc2u.e"),
path.join("urqmd", "urqmd.e"),
path.join("trento","src", "trento.e")
)
# check for existence of all executables
existenceFlag = True
print("Checking existence of executables.")
for exe in executables:
if not path.exists(path.join(ebeNodeFolder, exe)):
print("Executable %s not found." % exe)
existenceFlag = False
break
else:
print("Executable %s found." % exe)
# compile if necessary and check again
tempfile = open(path.join("utilities", "CompileRecord.txt"), "w")
if not existenceFlag:
print("Start building executables...")
call("./compile_all.sh", shell=True, cwd="utilities", stdout = tempfile, stderr = tempfile)
tempfile.close()
unlink(path.join("utilities", "CompileRecord.txt"))
# check for existence of all executables again
existenceFlag = True
print("Checking again existence of executables.")
for exe in executables:
if not path.exists(path.join(ebeNodeFolder, exe)):
print("Executable %s still not found." % exe)
existenceFlag = False
return False
print("All executables found.")
return True
def greetings(selection):
if selection==1:
print(r"""
_______ _________ _ _______
|\ /|( ____ \\__ __/( ( /|/ ___ )
| ) ( || ( \/ ) ( | \ ( |\/ ) |
| (___) || (__ | | | \ | | / )
| ___ || __) | | | (\ \) | / /
| ( ) || ( | | | | \ | / /
| ) ( || (____/\___) (___| ) \ | / (_/\
|/ \|(_______/\_______/|/ )_)(_______/
_______ _______ _______ _______
( ____ \( ____ )( ___ )|\ /|( ____ )
| ( \/| ( )|| ( ) || ) ( || ( )|
| | | (____)|| | | || | | || (____)|
| | ____ | __)| | | || | | || _____)
| | \_ )| (\ ( | | | || | | || (
| (___) || ) \ \__| (___) || (___) || )
(_______)|/ \__/(_______)(_______)|/
""")
elif selection==2:
print(r"""
_ _ _ _ _ _
/ /\ / /\ /\ \ /\ \ /\ \ _ /\ \
/ / / / / // \ \ \ \ \ / \ \ /\_\ / \ \
/ /_/ / / // /\ \ \ /\ \_\ / /\ \ \_/ / /__/ /\ \ \
/ /\ \__/ / // / /\ \_\ / /\/_/ / / /\ \___/ //___/ /\ \ \
/ /\ \___\/ // /_/_ \/_/ / / / / / / \/____/ \___\/ / / /
/ / /\/___/ // /____/\ / / / / / / / / / / / /
/ / / / / // /\____\/ / / / / / / / / / / / / _
/ / / / / // / /______ ___/ / /__ / / / / / / \ \ \__/\_\
/ / / / / // / /_______\/\__\/_/___\/ / / / / / \ \___\/ /
\/_/ \/_/ \/__________/\/_________/\/_/ \/_/ \/___/_/
_ _ _ _ _
/\ \ /\ \ /\ \ /\_\ /\ \
/ \ \ / \ \ / \ \ / / / _ / \ \
/ /\ \_\ / /\ \ \ / /\ \ \\ \ \__ /\_\ / /\ \ \
/ / /\/_/ / / /\ \_\ / / /\ \ \\ \___\ / / // / /\ \_\
/ / / ______ / / /_/ / / / / / \ \_\\__ / / / // / /_/ / /
/ / / /\_____\ / / /__\/ / / / / / / // / / / / // / /__\/ /
/ / / \/____ // / /_____/ / / / / / // / / / / // / /_____/
/ / /_____/ / // / /\ \ \ / / /___/ / // / /___/ / // / /
/ / /______\/ // / / \ \ \/ / /____\/ // / /____\/ // / /
\/___________/ \/_/ \_\/\/_________/ \/_________/ \/_/
""")
elif selection==3:
print(r"""
. __.....__ .--. _..._
.'| .-'' '. |__| .' '.
< | / .-''"'-. `. .--.. .-. .
| | / /________\ \| || ' ' |
| | .'''-. | || || | | |.--------.
| |/.'''. \\ .-------------'| || | | ||____ |
| / | | \ '-.____...---.| || | | | / /
| | | | `. .' |__|| | | | .' /
| | | | `''-...... -' | | | | / /___
| '. | '. .-'''-. | | | || |
'---' '---' ' _ \ '--' '--'|_________|
/ /` '. \ _________ _...._
.--./) . | \ ' \ |.' '-.
/.''\\ .-,.--. | ' | ' \ .'```'. '.
| | | | | .-. |\ \ / / \ | \ \
\`-' / | | | | `. ` ..' /_ _ | | | |
/("'` | | | | '-...-'`| ' / | | \ / .
\ '---. | | '- .' | .' | | |\`'-.-' .'
/'""'.\ | | / | / | | | '-....-'`
|| ||| | | `'. | .' '.
\'. __// |_| ' .'| '/'-----------'
`'---' `-' `--'
""")
if __name__ == '__main__':
checkEnvironment()
|
gpl-3.0
| 6,418,206,754,668,124,000
| 36.362385
| 99
| 0.352363
| false
| 3.165565
| false
| false
| false
|
madscatt/zazzie_1.5
|
trunk/sassie/simulate/complex_monte_carlo/nmer_dihedral.py
|
1
|
34612
|
'''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,string,locale,bisect,random,time,platform
import numpy
import random
try:
import Gnuplot,Gnuplot.PlotItems, Gnuplot.funcutils
except:
pass
import sasmol.sasmol as sasmol
import sassie.simulate.constraints.constraints as constraints
import sassie.simulate.monomer_monte_carlo.dihedral_monte_carlo as dihedral
import sassie.simulate.monomer_monte_carlo.dihedral_rotate as dihedral_rotate
import sassie.simulate.energy.dihedral_energy as energy
import sassie.simulate.monomer_monte_carlo.pairs as pairs
import sassie.simulate.monomer_monte_carlo.step as step
import nmer_overlap_check
import nmer_nrotate
# NMER_DIHEDRAL
#
# 09/26/05 -- gag-dihedral search : jc
# 11/19/05 -- gag-dimer dihedral search : jc
# 06/29/09 -- generalized to nmer : jc/sr
# 11/17/11 -- added sasmol support : jc
#
#LC 1 2 3 4 5 6 7
#LC4567890123456789012345678901234567890123456789012345678901234567890123456789
# * **
'''
NMR_DIHEDRAL is the module that contains the functions
that are used to generate ensembles of structures by varying
protein dihedral angles. This particular version allows multiple
flexible proteins in the presence of non-flexible proteins and
nucleic acids.
This module is called from Protein Complex Dihedral Generation from
the main GUI through the graphical_complex_generate.py script.
This module calls to C / Python extension modules to speed up
calculations.
'''
def unpack_variables(variables):
runname = variables['runname'][0]
dcdfile = variables['dcdfile'][0]
path = variables['path'][0]
pdbfile = variables['pdbfile'][0]
trials = variables['trials'][0]
goback = variables['goback'][0]
temp = variables['temp'][0]
nsegments = variables['nsegments'][0]
segbasis = variables['segbasis'][0]
npsegments = variables['npsegments'][0]
flpsegname = variables['flpsegname'][0]
sseglow = variables['seglow'][0]
sseghigh = variables['seghigh'][0]
#cutoff = variables['cutoff'][0]
lowrg = variables['lowrg'][0]
highrg = variables['highrg'][0]
zflag = variables['zflag'][0]
zcutoff = variables['zcutoff'][0]
cflag = variables['cflag'][0]
confile = variables['confile'][0]
plotflag = variables['plotflag'][0]
directedmc = variables['directedmc'][0]
seed = variables['seed'][0]
return runname,dcdfile,path,pdbfile,trials,goback,temp,nsegments,segbasis,npsegments,flpsegname,sseglow,sseghigh,lowrg,highrg,zflag,zcutoff,cflag,confile,plotflag,directedmc,seed
def print_failure(message,txtOutput):
txtOutput.put("\n\n>>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n\n")
txtOutput.put(message)
return
def wait(sti=None, prompt='Plot will clear in 2 seconds ...\n'):
'''
WAIT is the function to prompt the user to clear a plot on a screen
'''
if sti is not None:
print sti
try:
if(platform.system() == "Linux"):
import curses
stdscr = curses.initscr()
stdscr.addstr('press a key to continue')
c = stdscr.getch()
curses.endwin()
except:
time.sleep(1)
def alignment_initialization(all_segment_mol,asegs,abasis,flexible_segments,seglow,seghigh):
all_flexible_align_mask = []
all_flexible_coor_sub_m1 = []
all_flexible_com_sub_m1 = []
all_flexible_sub_m2 = []
for i in xrange(len(flexible_segments)):
this_segment = flexible_segments[i]
idx = asegs.index(this_segment)
m1 = all_segment_mol[idx]
if(m1.moltype()[0] == 'protein'):
this_basis = 'CA'
elif(m1.moltype()[0] == 'rna' or m1.moltype()[0] == 'dna'):
this_basis = 'P'
else:
print 'NO ALIGNMENT BASIS ATOM DEFINED FOR SEGNAME'
sys.exit()
### TODO need to handle the exception in complex_filter.py
### ONLY protein and RNA need this alignment
# get alignment sub molecule
align_filter = 'name[i] == "'+this_basis+'" and (segname[i] == "'+this_segment+'") and (resid[i] >= '+str(seglow[i])+' and resid[i] <= '+str(seghigh[i])+')'
error,align_mask = m1.get_subset_mask(align_filter)
all_flexible_align_mask.append(align_mask)
sub_m1=sasmol.SasMol(2)
error = m1.copy_molecule_using_mask(sub_m1,align_mask,0)
com_sub_m1 = sub_m1.calccom(0)
sub_m1.center(0)
coor_sub_m1 = sub_m1.coor()[0]
all_flexible_coor_sub_m1.append(coor_sub_m1)
all_flexible_com_sub_m1.append(com_sub_m1)
sub_m2 = sasmol.SasMol(4)
error = m1.copy_molecule_using_mask(sub_m2,align_mask,0)
all_flexible_sub_m2.append(sub_m2)
return all_flexible_align_mask,all_flexible_coor_sub_m1,all_flexible_com_sub_m1,all_flexible_sub_m2
def run_file_utilities(runname,pdbpath,pdbfile,dcdfile):
direxist=os.path.exists(runname)
if(direxist==0):
os.system('mkdir -p '+runname+'/')
#
# global run administration
#
genpath=runname+'/complex_monte_carlo'
genpaths=genpath+'/'
direxist=os.path.exists(genpath)
if(direxist==0):
os.system('mkdir -p '+genpath)
cpst='cp '+pdbpath+'/'+pdbfile+' '+genpaths
os.system(cpst)
#
# write global run name, pdb, and dcd filenames to .last_sas
#
fileexist=os.path.exists('.last_sas')
if(fileexist==1):
os.system('mv -f .last_sas .last_sas_bu')
lastsasfile=open('./.last_sas','w')
lastsasfile.write('run_name\t'+runname+'\n')
lastsasfile.write('pdb_name\t'+pdbfile+'\n')
lastsasfile.write('dcd_name\t'+dcdfile+'\n')
return lastsasfile,genpaths
def process_input_variables(psegvariables,segbasis,sseglow,sseghigh,flpsegname):
allsith=[]
allsnumranges=[]
allsrlow=[]
allsrnum=[]
allmoltype=[]
for i in range(len(psegvariables)):
allsnumranges.append(psegvariables[i][0])
allsith.append(psegvariables[i][1])
allsrlow.append(psegvariables[i][2])
allsrnum.append(psegvariables[i][3])
allmoltype.append(psegvariables[i][4])
#abasis=string.split(segbasis,',')
abasis=[item.strip() for item in string.split(segbasis,',')]
#seglow=string.split(sseglow,',')
#seghigh=string.split(sseghigh,',')
aith=[] ; anumranges=[] ; arlow=[] ; arnum=[]; amoltype=[]
for i in range(len(allsith)):
linith=string.split(allsith[i],',')
locith=[]
for i in range(len(linith)):
tith=linith[i]
fith=locale.atof(tith)
if(fith>180.0):
fith=180.0
elif(fith<0.0):
fith=0.0
locith.append(fith)
aith.append(locith)
for i in range(len(allsnumranges)):
nr=locale.atoi(allsnumranges[i])
anumranges.append(nr)
for i in range(len(allsrlow)):
linrlow=string.split(allsrlow[i],',')
linrnum=string.split(allsrnum[i],',')
rlow=[] ; rnum=[]
for k in range(len(linrlow)):
trlow=locale.atoi(linrlow[k])
trnum=locale.atoi(linrnum[k])
rlow.append(trlow)
rnum.append(trnum)
#print 'rlow = ',rlow
#print 'rnum = ',rnum
arlow.append(rlow)
arnum.append(rnum)
for i in range(len(psegvariables)):
moltype=allmoltype[i].strip()
amoltype.append(moltype)
'''
print 'anumranges = ',anumranges
print 'aith = ',aith
print 'arlow = ',arlow
print 'arnum = ',arnum
'''
raw_flexible_segments = string.split(flpsegname,",")
flexible_segments = []
for fp in raw_flexible_segments:
flexible_segments.append(fp.strip())
#print 'flexible_segments = ',flexible_segments
return amoltype,allsith,allsnumranges,allsrlow,allsrnum,abasis,sseglow,sseghigh,anumranges,aith,arlow,arnum,flexible_segments
def initialize_segments(m1,flexible_segments,nsegments,abasis):
segname = m1.segname()
asegs=[]
for tseg in segname:
if(tseg not in asegs):
asegs.append(tseg)
numsegs=len(asegs)
print 'found ',numsegs,' segment names'
first_last_resid = []
all_segment_mask = []
all_segment_full_mask = []
all_segment_basis_full_mask = []
all_segment_mol = []
tmask = ''
keyword_basis = False
if(len(abasis) == 1):
basis = abasis[0].strip()
if(basis.lower() == 'all' or basis.lower() == 'heavy' or basis.lower() == 'backbone'):
keyword_basis = True
for i in xrange(numsegs):
segmol = sasmol.SasMol(0)
error,segment_full_mask = m1.get_subset_mask('segname[i] == "'+asegs[i]+'"')
m1.copy_molecule_using_mask(segmol,segment_full_mask,0)
this_resid = segmol.resid()
first_last_resid.append([this_resid[0],this_resid[-1]])
all_segment_full_mask.append(segment_full_mask)
all_segment_mol.append(segmol)
### this is where abasis is used --> and this is where it matters!
if keyword_basis:
if(basis.lower() == 'all'):
#print 'setting up all atom overlap arrays'
segmol.set_average_vdw()
npairs = segmol.natoms()*(segmol.natoms() - 1)/2
cutoff_array = numpy.zeros(npairs,numpy.float)
pairs.pairs(segmol.atom_vdw(),cutoff_array)
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (not name[i] == "None") '
elif(basis.lower() == 'backbone'):
this_moltype = segmol.moltype()[0]
#print 'this_moltype = ',this_moltype ### check this
if(segmol.moltype()[0] == 'protein'):
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (name[i] == "N" or name[i] == "CA" or name[i] == "C") '
elif(segmol.moltype()[0] == 'rna' or segmol.moltype()[0] == 'dna'):
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (name[i] == "P" or name[i] == "O5\'" or name[i] == "C5\'" or name[i] == "C4\'" or name[i] == "C3\'" or name[i] == "O3\'") '
else:
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (not name[i][0] == "H") '
### TODO --> add to complex_filter so the following hack is not needed
elif(basis.lower() == 'heavy'):
keyword_basis_filter = 'segname[i] == "'+asegs[i]+'" and (not name[i][0] == "H") '
error,segment_basis_mask = m1.get_subset_mask(keyword_basis_filter)
else:
error,segment_basis_mask = m1.get_subset_mask('segname[i] == "'+asegs[i]+'" and name[i] =="'+abasis[i].strip()+'"')
all_segment_basis_full_mask.append(segment_basis_mask)
error,segment_mask = all_segment_mol[i].get_subset_mask('segname[i] == "'+asegs[i]+'"')
all_segment_mask.append(segment_mask)
### TODO ... this is probably why flexible segments need to be first!!
### should just take the NAMES of the flexible segnames to make this
###
### this is also where abasis is used --> but basis_full_mask is ONLY used for zcut
### checking: abasis itself is passed to check_overlap in nmer_nrotate
###
### OPTIONS: use moltype()[0] for each asegs[i] to set the basis (CA--> protein, P --> RNA)
### or better yet, use not hydrogen instead ... as this is ONLY used for z-cut check
###
tmask += '(segname[i] == "'+asegs[i]+'" and (not name[i][0] == "H")) '
#tmask+='segname[i] == "'+asegs[i]+'" and name[i] =="'+abasis[i].strip()+'"'
if i!=len(flexible_segments)-1:
tmask+=' or '
error,basis_full_mask= m1.get_subset_mask(tmask)
#print 'first_last_resid = ',first_last_resid
return asegs,first_last_resid,all_segment_mask,all_segment_full_mask,all_segment_basis_full_mask,basis_full_mask,all_segment_mol,keyword_basis
def initialize_interaction_regions(m1,interpairs,npairs,cutoff,sseglow,asegs,abasis):
if(len(interpairs)>0):
print 'pair distances < cut == ',cutoff,' angstroms between segments have been found'
print 'these distances will be ignorned in overlap check'
print 'interpairs = ',interpairs
else:
print 'all distances between segments are greater than cut == ',cutoff
print 'normal overlap checking will be used'
print 'npairs = ',npairs
### initialize interaction regions in each segment ###
interres=[] ; interatom=[]
for i in range(len(interpairs)):
segnum_1 = interpairs[i][0][0]
segnum_2 = interpairs[i][0][1]
for j in range(len(interpairs[i][1])):
resnum_1 = interpairs[i][1][j][0]
resnum_2 = interpairs[i][1][j][1]
### TODO --> need to match basis here as well
### TODO --> need to match basis here as well
### TODO --> need to match basis here as well
basis_segment_1 = '(segname[i] == "'+asegs[segnum_1]+'" and name[i] =="'+abasis[segnum_1].strip()+'")'
error,basis_mask_segment_1 = m1.get_subset_mask(basis_segment_1)
#idx_1 = numpy.where(basis_mask_segment_1==1.0)[0][resnum_1] # a ugly numpy function
idx_1 = filter(lambda x:basis_mask_segment_1[x]==1.0, range(len(basis_mask_segment_1)))[resnum_1]
basis_segment_2 = '(segname[i] == "'+asegs[segnum_2]+'" and name[i] =="'+abasis[segnum_2].strip()+'")'
error,basis_mask_segment_2 = m1.get_subset_mask(basis_segment_2)
#idx_2 = numpy.where(basis_mask_segment_2==1.0)[0][resnum_2] # a ugly numpy function
idx_2 = filter(lambda x:basis_mask_segment_2[x]==1.0, range(len(basis_mask_segment_2)))[resnum_2]
interres.append([resnum_1,resnum_2])
interatom.append([idx_1,idx_2])
print 'interres = ',interres
print 'interatom = ',interatom
return interatom,interres
def set_up_dihedral_arrays(all_segment_mol,asegs,abasis,amoltype,first_last_resid,flexible_segments,anumranges,arlow,arnum,keyword_basis,txtOutput):
flexible_dihedral_parameters = []
all_flexible_basis_mask = []
for i in xrange(len(flexible_segments)):
this_segname = flexible_segments[i]
idx = asegs.index(this_segname)
m1=all_segment_mol[idx]
### TODO --> need to deal with specific basis here
### TODO --> need to deal with specific basis here
### TODO --> need to deal with specific basis here
if(keyword_basis):
if amoltype[i]=='protein':
basis_atom = "CA"
elif amoltype[i]=='rna':
#basis_atom = "P"
basis_atom = "O5\'"
basis_filter = 'name[i] == "'+basis_atom+'" and segname[i] == "'+this_segname+'"'
else:
basis_filter = 'name[i] == "'+abasis[idx]+'" and segname[i] == "'+this_segname+'"'
error,basis_mask = m1.get_subset_mask(basis_filter)
all_flexible_basis_mask.append(basis_mask)
basis_m1=sasmol.SasMol(1)
error = m1.copy_molecule_using_mask(basis_m1,basis_mask,0)
basis_resname = basis_m1.resname()
basis_resid = basis_m1.resid()
arespsi=[] ; aresphi=[]
numranges=anumranges[i] ; reslow=arlow[i] ; numcont=arnum[i]
if amoltype[i]=='protein':
respsi=[] ; resphi=[]
energy.protein_initialization(respsi,resphi,basis_resid,basis_resname,numranges,reslow,numcont,first_last_resid[idx],txtOutput)
flexible_dihedral_parameters.append([respsi,resphi])
elif amoltype[i]=='rna':
resalpha = [] ; resbeta = [] ; resgamma = [] ; resdelta = [] ; resepsilon = [] ; reseta = []
energy.rna_initialization(resalpha,resbeta,resgamma,resdelta,resepsilon,reseta,basis_resid,basis_resname,numranges,reslow,numcont,first_last_resid[idx],txtOutput)
flexible_dihedral_parameters.append([resalpha,resbeta,resgamma,resdelta,resepsilon,reseta])
return flexible_dihedral_parameters,all_flexible_basis_mask
def set_up_constraints(m1,cflag,confile):
if(cflag == 1):
filter_flag = 0
error,constraint_basis1_array, constraint_basis2_array,distance_array,type_array = constraints.read_constraints(m1,confile,filter_flag)
mask_a_array = [] ; mask_b_array = []
for i in xrange(len(distance_array)):
print constraint_basis1_array[i]
print constraint_basis2_array[i]
print distance_array[i]
print type_array[i]
error,local_mask_a_array = m1.get_subset_mask(constraint_basis1_array[i])
error,local_mask_b_array = m1.get_subset_mask(constraint_basis2_array[i])
mask_a_array.append(local_mask_a_array)
mask_b_array.append(local_mask_b_array)
else:
mask_a_array = [] ; mask_b_array = []
distance_array = [] ; type_array = []
return mask_a_array,mask_b_array,distance_array,type_array
def setup_flexible_residue_mask_arrays(m1,flexible_segments,anumranges,arlow,arnum,amoltype,txtOutput):
all_flexible_residues = []
all_flexible_residue_rotation_indices = []
all_flexible_residue_rotation_mask = []
for i in xrange(len(flexible_segments)):
numranges=anumranges[i] ; reslow=arlow[i] ; numcont=arnum[i]
flexible_residues = dihedral.get_flexible_residues(numranges,reslow,numcont)
all_flexible_residues.append(flexible_residues)
segment_filter = 'segname[i] == "'+flexible_segments[i]+'"'
error,segment_mask = m1.get_subset_mask(segment_filter)
#print 'segment_filter = ',segment_filter
#print 'error = ',error
segment_m1=sasmol.SasMol(98)
error = m1.copy_molecule_using_mask(segment_m1,segment_mask,0)
molecule_type = amoltype[i]
residue_rotation_indices,residue_rotation_mask = dihedral.get_rotation_indices(segment_m1,molecule_type,flexible_residues,txtOutput)
all_flexible_residue_rotation_indices.append(residue_rotation_indices)
all_flexible_residue_rotation_mask.append(residue_rotation_mask)
return all_flexible_residues,all_flexible_residue_rotation_indices,all_flexible_residue_rotation_mask
def evaluate_rg(rg_difference_list,directed_rg_list,accepted_rg_list,this_rg_difference,this_rg,accepted):
maximum_value = max(rg_difference_list)
if(maximum_value > this_rg_difference):
index = rg_difference_list.index(maximum_value)
rg_difference_list[index] = this_rg_difference
directed_rg_list[index] = this_rg
accepted_rg_list[index] = accepted
return
### main method ###
def dihedralgenerate(variables,psegvariables,txtOutput):
#amoltype=['protein','protein']
#amoltype=['rna','protein']
#amoltype=['protein']
#ttxt=time.ctime()
ttxt=time.asctime( time.gmtime( time.time() ) )
st=''.join(['=' for x in xrange(60)])
txtOutput.put("\n%s \n" %(st))
txtOutput.put("DATA FROM RUN: %s \n\n" %(ttxt))
# unpack variables
runname,dcdfile,path,pdbfile,trials,goback,temp,nsegments,segbasis,npsegments,flpsegname,sseglow,sseghigh,lowrg,highrg,zflag,zcutoff,cflag,confile,plotflag,directedmc,seed=unpack_variables(variables)
segbasis.strip()
# process variables
amoltype,allsith,allsnumranges,allsrlow,allsrnum,abasis,seglow,seghigh,anumranges,aith,arlow,arnum,flexible_segments = process_input_variables(psegvariables,segbasis,sseglow,sseghigh,flpsegname)
import pprint; fout = open('a.txt','w'); pprint.pprint(variables,fout); pprint.pprint(psegvariables,fout); pprint.pprint(segbasis,fout); pprint.pprint(seglow,fout); pprint.pprint(seghigh,fout); pprint.pprint(flpsegname,fout); fout.close()
# set up run file I/O
lastsasfile,genpaths = run_file_utilities(runname,path,pdbfile,dcdfile)
kb=1.380658E-23 # J/K
beta=1.0/(temp*kb)
m1 = sasmol.SasMol(0)
m1.read_pdb(path+pdbfile)
nf1=m1.number_of_frames()
#print 'nf1 = %d\n' % nf1
dcdoutfile = m1.open_dcd_write(genpaths+dcdfile)
# set up segment arrays
asegs,first_last_resid,all_segment_mask,all_segment_full_mask,all_segment_basis_full_mask,basis_full_mask,all_segment_mol,keyword_basis = initialize_segments(m1,flexible_segments,nsegments,abasis)
# set up constraints variables
mask_a_array,mask_b_array,distance_array,type_array = set_up_constraints(m1,cflag,confile)
# set up segment alignment coordinates and com arrays
all_flexible_align_mask,all_flexible_coor_sub_m1,all_flexible_com_sub_m1,all_flexible_sub_m2 = alignment_initialization(all_segment_mol,asegs,abasis,flexible_segments,seglow,seghigh)
if(keyword_basis):
if(segbasis.lower() == 'all'):
cutoff = 0.8
elif(segbasis.lower() == 'heavy' or segbasis.lower() == 'backbone'):
cutoff = 0.8
else:
cutoff = 2.0
print 'cutoff = ',cutoff
check_initial_interactions = False
if(check_initial_interactions):
# survey interaction between segments
interpairs,npairs=nmer_overlap_check.nmer_overlap_check(m1,path,pdbfile,cutoff,abasis,keyword_basis)
interatom,interres = initialize_interaction_regions(m1,interpairs,npairs,cutoff,sseglow,asegs,abasis)
else:
interpairs = [] ; npairs = 0
interatom = [] ; interres = []
# set up dihedral parameters for each flexible segment
flexible_dihedral_parameters,all_flexible_basis_mask = set_up_dihedral_arrays(all_segment_mol,asegs,abasis,amoltype,first_last_resid,flexible_segments,anumranges,arlow,arnum,keyword_basis,txtOutput)
if(segbasis.lower() == 'all' or segbasis.lower() == 'heavy' or segbasis.lower() == 'backbone'):
print 'segbasis = ',segbasis,' so I should stop for now\n'
#sys.exit()
else:
print 'segbasis = ',segbasis,' so I should continue\n'
# set up flexible residue rotation mask arrays
all_flexible_residues,all_flexible_residue_rotation_indices,all_flexible_residue_rotation_mask = setup_flexible_residue_mask_arrays(m1,flexible_segments,anumranges,arlow,arnum,amoltype,txtOutput)
step_parameters = step.Setup()
hrg=0.0 ; lowestrg=1000.0
an='psi'
accepted=0 ; over=0 ; badrg=0 ; badz=0 ; badc=0 ; nsteps=0 ; arg=0.0 ; trg=0.0
coor = m1.coor()
frame = 0
# MAIN LOOP
q0=1;th=1.0;seg=asegs[0]
pairdat=[an,q0,th,seg]
all_rg_tally=[] ; accepted_rg_tally=[]
phi_tally=[] ; aphi_tally=[]
psi_tally=[] ; apsi_tally=[]
atpsi_tally=[] ; atphi_tally=[] ; atphipsi_tally=[]
if(plotflag == 1):
graph = Gnuplot.Gnuplot(debug=1)
graph.clear()
graph('set title "Rg Results"')
graph.xlabel('Structure Number')
graph.ylabel('Rg (Angstrom^2)')
nonbondflag = 0
if(seed[0] == 1):
from numpy.random import RandomState
seed_object = RandomState(seed[1])
else:
seed_object = -1
failtally=0 ; acc=0 ; afile=''; accfile=[]
minx=[] ; miny=[] ; minz=[]
maxx=[] ; maxy=[] ; maxz=[]
if(directedmc > 0):
rg_difference_list = []
directed_rg_list = []
accepted_rg_list = []
rg_list_length = 10 ### hardwired
for i in range(trials):
if(seed[0] == 1):
ran_num = seed_object.rand()
tflexsegn = int(len(flexible_segments)*ran_num)
tsegn = asegs.index(flexible_segments[tflexsegn])
else:
tflexsegn = int(len(flexible_segments)*random.random())
tsegn = asegs.index(flexible_segments[tflexsegn])
tseg = asegs[tsegn]
molecule_type = amoltype[tflexsegn]
dtheta = aith[tflexsegn] ; numranges = anumranges[tflexsegn]
reslow = arlow[tflexsegn] ; numcont = arnum[tflexsegn]
segment_full_mask = all_segment_full_mask[tsegn]
error,new_coor = m1.get_coor_using_mask(frame,segment_full_mask)
segment_mol = all_segment_mol[tsegn]
segment_mol.setCoor(new_coor)
'''
if(i<10):
print 'segment_mol.coor()[0,0,0] = ',segment_mol.coor()[0,0,0]
else:
sys.exit()
'''
vdi,vdf,indices,this_mask=step_parameters.chooser(new_coor,segment_mol,pairdat,dtheta,numranges,reslow,numcont,flexible_dihedral_parameters[tflexsegn],beta,all_flexible_residue_rotation_indices[tflexsegn],all_flexible_residue_rotation_mask[tflexsegn],nonbondflag,first_last_resid[tsegn],molecule_type,seed_object)
'''
print 'len(indices) = ',len(indices)
print 'indices[0] = ',indices[0]
print 'indices[-1] = ',indices[-1]
print 'tsegn = ',tsegn
'''
pairdat[3] = tseg
an=pairdat[0] ; q0=pairdat[1] ; th=pairdat[2] ; seg=pairdat[3]
nsteps+=1
re=[0,0,0,0.0,0.0,lowestrg,hrg,0,0,[]]
newafile = nmer_nrotate.rotate(coor,m1,q0,th,an,cutoff,lowrg,highrg,re,accepted,zflag,zcutoff,cflag,dcdoutfile,indices,this_mask,all_flexible_basis_mask[tflexsegn],all_flexible_sub_m2[tflexsegn],all_flexible_align_mask[tflexsegn],all_flexible_coor_sub_m1[tflexsegn],all_flexible_com_sub_m1[tflexsegn],mask_a_array,mask_b_array,distance_array,type_array,first_last_resid[tsegn],molecule_type,all_segment_mask[tsegn],segment_full_mask,all_segment_basis_full_mask,basis_full_mask,all_segment_mol[tsegn],asegs,abasis,interatom,interres)
print '.', ; sys.stdout.flush()
accepted=accepted+re[0] ; over=over+re[1] ; badrg=badrg+re[2] ; rg_value=re[3]
trg=trg+re[3] ; arg=arg+re[4] ; lowestrg=re[5] ; hrg=re[6]
badz=badz+re[7] ; badc=badc+re[8]
if(len(re[9])>0):
minmax=re[9]
minx.append(minmax[0][0]) ; miny.append(minmax[0][1]) ; minz.append(minmax[0][2])
maxx.append(minmax[1][0]) ; maxy.append(minmax[1][1]) ; maxz.append(minmax[1][2])
all_rg_tally.append([i,rg_value])
if(re[0]==1):
accepted_rg_tally.append([i,accepted,rg_value])
if(directedmc > 0):
if(len(rg_difference_list)<= rg_list_length):
this_rg_difference = abs(rg_value - directedmc)
rg_difference_list.append(this_rg_difference)
directed_rg_list.append(rg_value)
accepted_rg_list.append(accepted)
else:
this_rg_difference = abs(rg_value - directedmc)
evaluate_rg(rg_difference_list,directed_rg_list,accepted_rg_list,this_rg_difference,rg_value,accepted)
if(re[0]==0):
if(failtally==goback):
failtally=0
if(accepted > 0):
if(seed[0] == 1):
ran_num = seed_object.rand()
dum=int(accepted*ran_num)-1
elif(directedmc > 0):
local_rg_list_length = len(directed_rg_list)
ran_num = random.randrange(0,local_rg_list_length)
dum = accepted_rg_list[ran_num]
else:
dum=int(accepted*random.random())-1
if(dum==-1):
print '\nreloading coordinates from original starting structure'
m1.read_pdb(path+pdbfile,fastread=True,saspdbrx_topology=True)
coor=m1.coor()
else:
print '\nreloading coordinates from a previously accepted structure'
m1.read_single_dcd_step(genpaths+dcdfile,dum+1)
#m1.read_single_dcd_step(genpaths+dcdfile,dum)
coor=m1.coor()
else:
print '\n>>>>>reloading coordinates from original starting structure'
m1.read_pdb(path+pdbfile,fastread=True,saspdbrx_topology=True)
coor=m1.coor()
else:
failtally=failtally+1
if(((i+1)%(float(trials)/100.0)==0 or (trials<10))):
fraction_done = (float(i+1)/float(trials))
progress_string='\nCOMPLETED '+str(i+1)+' of '+str(trials)+' : '+str(fraction_done*100.0)+' % done'
print('%s\n' % progress_string)
print accepted,' configurations accepted out of ',nsteps,(float(accepted)/nsteps)*100.0,' %\n\n'
report_string='STATUS\t'+str(fraction_done)
txtOutput.put(report_string)
if(i>9):
if((i+1)%(trials/10)==0 and accepted > 0 and i+1>10):
if(plotflag == 1):
graph.plot(Gnuplot.Data(all_rg_tally,using='1:2 w p ps 4',title='all Rg'),Gnuplot.Data(accepted_rg_tally,using='1:3 w lp pt 5 ps 2',title='accepted'))
fraction_done = (float(i+1)/float(trials))
report_string='STATUS\t'+str(fraction_done)
txtOutput.put(report_string)
elif(accepted > 0):
if(plotflag == 1):
graph.plot(Gnuplot.Data(all_rg_tally,using='1:2 w p ps 4',title='all Rg'),Gnuplot.Data(accepted_rg_tally,using='1:3 w lp pt 5 ps 2',title='accepted'))
fraction_done = (float(i+1)/float(trials))
report_string='STATUS\t'+str(fraction_done)
txtOutput.put(report_string)
m1.close_dcd_write(dcdoutfile)
rgplot=open('./'+runname+'/complex_monte_carlo/'+dcdfile+'.all_rg_results_data.txt','w')
rgplot.write('# structure number (structure 1 = 1; not 0), Rg (all)\n')
for ii in range(len(all_rg_tally)):
rgplot.write('%i\t%f\n' % (all_rg_tally[ii][0]+1,all_rg_tally[ii][1]))
rgplot.close()
rgplot=open('./'+runname+'/complex_monte_carlo/'+dcdfile+'.accepted_rg_results_data.txt','w')
rgplot.write('# structure number (structure 1 = 1; not 0), Rg (accepted)\n')
for ii in range(len(accepted_rg_tally)):
rgplot.write('%i\t%f\t%i\n' % (accepted_rg_tally[ii][1]-1,accepted_rg_tally[ii][2],accepted_rg_tally[ii][0]+1))
rgplot.close()
'''
outfile2=open(genpaths+dcdfile+'.phi','w')
outfile3=open(genpaths+dcdfile+'.psi','w')
outfile5=open(genpaths+dcdfile+'.aphi','w')
outfile6=open(genpaths+dcdfile+'.apsi','w')
outfile7=open(genpaths+dcdfile+'.aphivsapsi','w')
outfile7.write('# ACCEPTED STRUCTURES\n')
outfile7.write('# AA phi psi\n')
for i in range(len(phi_tally)):
outfile2.write('%i\t%f\n' % (phi_tally[i][0],phi_tally[i][1]))
for i in range(len(psi_tally)):
outfile3.write('%i\t%f\n' % (psi_tally[i][0],psi_tally[i][1]))
for i in range(len(aphi_tally)):
outfile5.write('%i\t%f\n' % (aphi_tally[i][0],aphi_tally[i][1]))
for i in range(len(apsi_tally)):
outfile6.write('%i\t%f\n' % (apsi_tally[i][0],apsi_tally[i][1]))
for i in range(len(atphipsi_tally)):
outfile7.write('%i\t%f\t%f\n' % (atphipsi_tally[i][0],atphipsi_tally[i][1],atphipsi_tally[i][2]))
outfile2.close()
outfile3.close()
outfile5.close()
outfile6.close()
outfile7.close()
'''
ttxt=time.ctime()
st=''.join(['=' for x in xrange(60)])
if(accepted > 0):
txtOutput.put("Average accepted rg2 = %lf\n" % (arg/(accepted)))
txtOutput.put("Configurations and statistics saved in %s directory\n" % ('./'+genpaths))
else:
txtOutput.put("Average accepted rg2 = %lf\n" % (0.0))
txtOutput.put("\n NO ACCEPTED MOVES\n\n Statistics saved in %s directory\n" % (genpaths))
outfile7=open(genpaths+dcdfile+'.stats','w')
outfile7.write('%s\t%f\t%s\t%f\n' % ('lowest Rg = ',lowestrg,'highest Rg = ',hrg))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('accepted ',accepted,' out of ',nsteps,' moves : ',(accepted/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('overlapped ',over,' out of ',nsteps,' moves : ',(over/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('bad rg2 ',badrg,' out of ',nsteps,' moves : ',(badrg/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('bad z-filter ',badz,' out of ',nsteps,' moves : ',(badz/float(nsteps))*100.0, ' %'))
outfile7.write('%s\t%i\t%s\t%i\t%s\t%f%s\n' % ('bad constaints ',badc,' out of ',nsteps,' moves : ',(badc/float(nsteps))*100.0, ' %'))
if(accepted>0):
outfile7.write('%s\t%f\n' % ('average accepted rg2 = ',arg/(accepted)))
else:
outfile7.write('%s\t%f\n' % ('average accepted rg2 = ',0.0))
outfile7.write('%s\t%f\n' % ('average total rg2 of ensemble = ',trg/(nsteps)))
print '\n\nDCD data were written to %s\n' % ('./'+genpaths+dcdfile)
txtOutput.put( "\nDCD data were written to %s\n\n" % ('./'+genpaths+dcdfile))
txtOutput.put("lowest Rg = %lf\t highest Rg = %lf\n" % (lowestrg,hrg))
txtOutput.put("accepted %d out of %d : %lf percent\n" % (accepted,nsteps,(accepted/float(nsteps))*100.0))
txtOutput.put("overlapped %d out of %d moves : %lf percent\n" % (over,nsteps,(float(over)/float(nsteps))*100.0))
txtOutput.put("bad rg2 %d out of %d moves : %lf percent\n" % (badrg,nsteps,(float(badrg)/float(nsteps))*100.0))
if(zflag==1):
txtOutput.put("bad zcut %d out of %d moves : %lf percent\n\n\n" % (badz,nsteps,(float(badz)/float(nsteps))*100.0))
if(cflag==1):
txtOutput.put("constraint filter rejected %d out of %d moves : %lf percent\n\n\n" % (badc,nsteps,(float(badc)/float(nsteps))*100.0))
if(len(minx)>0 and len(miny)>0 and len(minz)>0 and len(maxx)>0 and len(maxy)>0 and len(maxz)>0):
min_x = numpy.min(minx) ; min_y = numpy.min(miny) ; min_z = numpy.min(minz)
max_x = numpy.max(maxx) ; max_y = numpy.max(maxy) ; max_z = numpy.max(maxz)
txtOutput.put("\nminimum x = %lf\t maximum x = %lf -> range: %lf Angstroms\n" % (min_x,max_x,(max_x-min_x)))
txtOutput.put("minimum y = %lf\t maximum y = %lf -> range: %lf Angstroms\n" % (min_y,max_y,(max_y-min_y)))
txtOutput.put("minimum z = %lf\t maximum z = %lf -> range: %lf Angstroms\n\n" % (min_z,max_z,(max_z-min_z)))
outfile7.write("\nminimum x = %lf\t maximum x = %lf -> range: %lf Angstroms\n" % (min_x,max_x,(max_x-min_x)))
outfile7.write("minimum y = %lf\t maximum y = %lf -> range: %lf Angstroms\n" % (min_y,max_y,(max_y-min_y)))
outfile7.write("minimum z = %lf\t maximum z = %lf -> range: %lf Angstroms\n\n" % (min_z,max_z,(max_z-min_z)))
outfile7.close()
else:
outfile7.close()
txtOutput.put("\n%s \n" %(st))
lastsasfile.close()
print 'COMPLEX DIHEDRAL IS DONE'
time.sleep(1.5)
if(plotflag == 1):
wait('\n')
return()
if __name__=='__main__':
runname = 'run_0'
dcdfile = 'run_0.dcd'
path = './'
pdbfile = 'fram601.pdb'
trials = '50'
goback = '50'
nsegments = '2'
npsegments = '2'
flpsegname = 'ENDA,ENDB'
segbasis = 'CA, CA'
#segbasis = 'all'
#segbasis = 'heavy'
#segbasis = 'backbone'
seglow = '95, 95'
seghigh = '110, 110'
temp = '300.0'
lowrg = '20.0'
highrg = '185.0'
zflag = '0'
zcutoff = '0.0'
cflag = '0'
confile = 'constraints.txt'
directedmc = '0'
psffilepath='./'
psffilename = 'refgag.psf'
import sassie.sasconfig as sasconfig
parmfilepath=sasconfig._bin_path+'toppar'
parmfilename = 'par_all27_prot_na.inp'
plotflag = '1'
seed = '0, 123'
svariables={}
svariables['cflag'] = (cflag,'int')
svariables['confile'] = (confile,'string')
svariables['dcdfile'] = (dcdfile,'string')
svariables['directedmc'] = (directedmc,'float')
svariables['flpsegname'] = (flpsegname, 'string')
svariables['goback'] = (goback,'int')
svariables['highrg'] = (highrg,'float')
svariables['lowrg'] = (lowrg,'float')
svariables['npsegments'] = (npsegments,'int')
svariables['nsegments'] = (nsegments,'int')
svariables['parmfilename'] = (parmfilename,'string')
svariables['path'] = (path,'string')
svariables['pdbfile'] = (pdbfile,'string')
svariables['plotflag'] = (plotflag,'int')
svariables['psffilename'] = (psffilename,'string')
svariables['runname'] = (runname,'string')
svariables['seed'] = (seed,'int_array')
svariables['segbasis'] = (segbasis,'string')
svariables['seghigh'] = (seghigh,'int_array')
svariables['seglow'] = (seglow,'int_array')
svariables['temp'] = (temp,'float')
svariables['trials'] = (trials,'int')
svariables['zcutoff'] = (zcutoff,'float')
svariables['zflag'] = (zflag, 'int')
psegvariables= [['1', '30', '2', '30', 'protein'], ['1', '30', '2', '30', 'protein']]
import sassie.interface.input_filter as input_filter
error,variables=input_filter.type_check_and_convert(svariables)
#error=generate_filter.check_protein(variables,eflag,monflag)
if(len(error)>0):
print 'error = ',error
sys.exit()
runname=variables['runname'][0]
import multiprocessing
txtQueue=multiprocessing.JoinableQueue()
dihedralgenerate(variables,psegvariables,txtQueue)
|
gpl-3.0
| 4,616,042,028,928,589,000
| 34.830228
| 534
| 0.669941
| false
| 2.647999
| false
| false
| false
|
ian-r-rose/burnman
|
burnman/minerals/SLB_2005.py
|
1
|
2794
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
SLB_2005
^^^^^^^^
Minerals from Stixrude & Lithgow-Bertelloni 2005 and references therein
"""
from __future__ import absolute_import
from .. import mineral_helpers as helpers
from ..mineral import Mineral
class stishovite (Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 14.02e-6,
'K_0': 314.0e9,
'Kprime_0': 4.4,
'G_0': 220.0e9,
'Gprime_0': 1.6,
'molar_mass': .0601,
'n': 3,
'Debye_0': 1044.,
'grueneisen_0': 1.34,
'q_0': 2.4,
'eta_s_0': 5.0}
Mineral.__init__(self)
class periclase (Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 11.24e-6,
'K_0': 161.0e9,
'Kprime_0': 3.8,
'G_0': 131.0e9,
'Gprime_0': 2.1,
'molar_mass': .0403,
'n': 2,
'Debye_0': 773.,
'grueneisen_0': 1.5,
'q_0': 1.5,
'eta_s_0': 2.8}
Mineral.__init__(self)
class wuestite (Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 12.06e-6,
'K_0': 152.0e9,
'Kprime_0': 4.9,
'G_0': 47.0e9,
'Gprime_0': 0.7,
'molar_mass': .0718,
'n': 2,
'Debye_0': 455.,
'grueneisen_0': 1.28,
'q_0': 1.5,
'eta_s_0': 0.8}
Mineral.__init__(self)
class mg_perovskite(Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 24.45e-6,
'K_0': 251.0e9,
'Kprime_0': 4.1,
'G_0': 175.0e9,
'Gprime_0': 1.7,
'molar_mass': .1000,
'n': 5,
'Debye_0': 1070.,
'grueneisen_0': 1.48,
'q_0': 1.4,
'eta_s_0': 2.6}
Mineral.__init__(self)
class fe_perovskite(Mineral):
def __init__(self):
self.params = {
'equation_of_state': 'slb3',
'V_0': 25.48e-6,
'K_0': 281.0e9,
'Kprime_0': 4.1,
'G_0': 138.0e9,
'Gprime_0': 1.7,
'molar_mass': .1319,
'n': 5,
'Debye_0': 841.,
'grueneisen_0': 1.48,
'q_0': 1.4,
'eta_s_0': 2.1}
Mineral.__init__(self)
mg_bridgmanite = mg_perovskite
fe_bridgmanite = fe_perovskite
|
gpl-2.0
| -8,929,445,697,747,135,000
| 22.880342
| 111
| 0.430208
| false
| 2.774578
| false
| false
| false
|
CitrineInformatics/python-citrination-client
|
citrination_client/models/tests/test_service_status.py
|
1
|
4405
|
from citrination_client.models import ServiceStatus
from citrination_client.base.errors import CitrinationClientError
import pytest
example_status_response_dict = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 1.0,
"subevent": {
"title": "A slightly more granular description of what were doing",
"subtitle": "An even more granular description of what were doing",
"normalizedProgress": 1.0
}
}
}
def test_can_initialize_from_response_dict():
status = ServiceStatus.from_response_dict(example_status_response_dict)
assert status.is_ready()
assert status.reason == example_status_response_dict["reason"]
assert status.context == example_status_response_dict["context"]
event = status.event
assert event.title == example_status_response_dict["event"]["title"]
assert event.subtitle == example_status_response_dict["event"]["subtitle"]
assert event.normalized_progress == example_status_response_dict["event"]["normalizedProgress"]
subevent = event.subevent
assert subevent.title == example_status_response_dict["event"]["subevent"]["title"]
assert subevent.subtitle == example_status_response_dict["event"]["subevent"]["subtitle"]
assert subevent.normalized_progress == example_status_response_dict["event"]["subevent"]["normalizedProgress"]
example_status_response_dict_without_event = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice"
}
def test_can_initialize_from_response_dict_without_event():
status = ServiceStatus.from_response_dict(example_status_response_dict_without_event)
assert status.is_ready()
assert status.reason == example_status_response_dict_without_event["reason"]
assert status.context == example_status_response_dict_without_event["context"]
assert status.event is None
example_status_response_dict_without_subevent = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 1.0
}
}
def test_can_initialize_from_response_dict_without_subevent():
status = ServiceStatus.from_response_dict(example_status_response_dict_without_subevent)
assert status.is_ready()
assert status.reason == example_status_response_dict_without_subevent["reason"]
assert status.context == example_status_response_dict_without_subevent["context"]
event = status.event
assert event.title == example_status_response_dict_without_subevent["event"]["title"]
assert event.subtitle == example_status_response_dict_without_subevent["event"]["subtitle"]
assert event.normalized_progress == example_status_response_dict_without_subevent["event"]["normalizedProgress"]
assert event.subevent is None
example_status_response_dict_not_ready = {
"reason": "Please wait for machine learning features to become available",
"ready": False,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 0.33
}
}
def test_can_initialize_from_response_dict_not_ready():
status = ServiceStatus.from_response_dict(example_status_response_dict_not_ready)
assert not status.is_ready()
assert status.reason == example_status_response_dict_not_ready["reason"]
assert status.context == example_status_response_dict_not_ready["context"]
event = status.event
assert event.title == example_status_response_dict_not_ready["event"]["title"]
assert event.subtitle == example_status_response_dict_not_ready["event"]["subtitle"]
assert event.normalized_progress == example_status_response_dict_not_ready["event"]["normalizedProgress"]
assert event.subevent is None
example_status_response_dict_nonsense = {
"reason": "Please wait for machine learning features to become available",
"ready": True,
"context": "notice",
"event": {
"title": "Initializing machine learning services",
"subtitle": "Doin some other stuff",
"normalizedProgress": 0.33
}
}
|
apache-2.0
| 4,386,586,981,293,500,400
| 36.649573
| 116
| 0.720999
| false
| 3.915556
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.