repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
opethe1st/CompetitiveProgramming
|
Codility/5PrefixSums/GeonomicRange.py
|
1
|
1181
|
def solution(S, P, Q):
# write your code in Python 2.7
prefixA = [0] * (len(S) + 1)
prefixC = [0] * (len(S) + 1)
prefixG = [0] * (len(S) + 1)
prefixT = [0] * (len(S) + 1)
for i in xrange(len(S)):
if S[i] == 'A':
prefixA[i + 1] = prefixA[i] + 1
else:
prefixA[i + 1] = prefixA[i]
for i in xrange(len(S)):
if S[i] == 'C':
prefixC[i + 1] = prefixC[i] + 1
else:
prefixC[i + 1] = prefixC[i]
for i in xrange(len(S)):
if S[i] == 'G':
prefixG[i + 1] = prefixG[i] + 1
else:
prefixG[i + 1] = prefixG[i]
for i in xrange(len(S)):
if S[i] == 'T':
prefixT[i + 1] = prefixT[i] + 1
else:
prefixT[i + 1] = prefixT[i]
ans = []
for i in xrange(len(P)):
# print prefixC,Q[i],P[i]
if prefixA[Q[i] + 1] > prefixA[P[i]]:
ans.append(1)
elif prefixC[Q[i] + 1] > prefixC[P[i]]:
ans.append(2)
elif prefixG[Q[i] + 1] > prefixG[P[i]]:
ans.append(3)
elif prefixT[Q[i] + 1] > prefixT[P[i]]:
ans.append(4)
return ans
|
gpl-3.0
| -6,070,044,671,206,912,000
| 29.282051
| 47
| 0.42591
| false
| 2.714943
| false
| false
| false
|
magfest/ubersystem
|
alembic/versions/5ceaec4834aa_associate_mits_docs_and_pictures_with_.py
|
1
|
3130
|
"""Associate MITS docs and pictures with games instead of teams
Revision ID: 5ceaec4834aa
Revises: 4036e1fdb9ee
Create Date: 2020-04-14 23:23:35.417496
"""
# revision identifiers, used by Alembic.
revision = '5ceaec4834aa'
down_revision = '4036e1fdb9ee'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import residue
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('mits_document', sa.Column('game_id', residue.UUID(), nullable=False))
op.drop_constraint('fk_mits_document_team_id_mits_team', 'mits_document', type_='foreignkey')
op.create_foreign_key(op.f('fk_mits_document_game_id_mits_game'), 'mits_document', 'mits_game', ['game_id'], ['id'])
op.drop_column('mits_document', 'team_id')
op.add_column('mits_picture', sa.Column('game_id', residue.UUID(), nullable=False))
op.drop_constraint('fk_mits_picture_team_id_mits_team', 'mits_picture', type_='foreignkey')
op.create_foreign_key(op.f('fk_mits_picture_game_id_mits_game'), 'mits_picture', 'mits_game', ['game_id'], ['id'])
op.drop_column('mits_picture', 'team_id')
def downgrade():
op.add_column('mits_picture', sa.Column('team_id', postgresql.UUID(), autoincrement=False, nullable=False))
op.drop_constraint(op.f('fk_mits_picture_game_id_mits_game'), 'mits_picture', type_='foreignkey')
op.create_foreign_key('fk_mits_picture_team_id_mits_team', 'mits_picture', 'mits_team', ['team_id'], ['id'])
op.drop_column('mits_picture', 'game_id')
op.add_column('mits_document', sa.Column('team_id', postgresql.UUID(), autoincrement=False, nullable=False))
op.drop_constraint(op.f('fk_mits_document_game_id_mits_game'), 'mits_document', type_='foreignkey')
op.create_foreign_key('fk_mits_document_team_id_mits_team', 'mits_document', 'mits_team', ['team_id'], ['id'])
op.drop_column('mits_document', 'game_id')
|
agpl-3.0
| -4,039,767,539,974,114,300
| 41.297297
| 120
| 0.651757
| false
| 3.193878
| false
| false
| false
|
Coburn37/DoxygenMediawikiBot
|
doxymw.py
|
1
|
9187
|
#A python bot sitting atop PyWikibot and Doxygen to automatically
#add doxygen docs to a wiki for your documentation pleasure
#The main goal of this is to take the power and placement of doxygen docs
#and combine it with the flexibility and remoteness of a wiki
import re
import os
import sys
import subprocess
import errno
import pywikibot
import doxymwglobal
from doxymwsite import DoxyMWSite
from doxymwpage import DoxygenHTMLPage
#Calls doxygen using a config file and outputs everything to a temporary path
def generateDoxygenHTMLDocs():
#Try the config file
with open(doxymwglobal.config["doxygen_configPath"]) as fp:
configLines = fp.readlines()
fp.seek(0)
config = fp.read()
#Parameters we must force to generate proper, small, output
params = {}
params["doxygen_paramsForce"] = {
#Output file format and location
#Critical
"OUTPUT_DIRECTORY" : "\"" + doxymwglobal.config["doxygen_tmpPath"] + "\"",
"GENERATE_HTML" : "YES",
"HTML_OUTPUT" : "html",
"HTML_FILE_EXTENSION" : ".html",
"HIDE_COMPOUND_REFERENCE": "YES", #Cleaner titles
#Disabling specific HTML sections
#Possibly critical, makes HTML easier to work with
"DISABLE_INDEX" : "YES",
"SEARCHENGINE" : "NO",
#Turn off other generation
#Not critical but wanted
#Extra HTML
"GENERATE_DOCSET" : "NO",
"GENERATE_HTMLHELP" : "NO",
"GENERATE_QHP" : "NO",
"GENERATE_ECLIPSEHELP" : "NO",
"GENERATE_TREEVIEW" : "NO",
#Other generations
"GENERATE_LATEX" : "NO",
"GENERATE_RTF" : "NO",
"GENERATE_XML" : "NO",
"GENERATE_DOCBOOK" : "NO",
"GENERATE_AUTOGEN_DEF" : "NO",
"GENERATE_PERLMOD" : "NO"
}
#Parameters we warn about but do not enforce
params["doxygen_paramsWarn"] = {
"CASE_SENSE_NAMES" : "NO" #MediaWiki doesn't support case sensitivity in title names
}
#Read each line for params to warn about
warnParams = params["doxygen_paramsWarn"]
for line in configLines:
#Comments
if line[0] == "#":
continue
match = re.match('\s*(\S+)\s*=\s+(\S*)', line)
if match:
k, v = match.group(0,1)
#Warn about specific parameters
for warn in warnParams.keys():
if k == warn and v != warnParams[warn]:
doxymwglobal.msg(doxymwglobal.warning, "Doxygen config has parameter " + warn + " not set to " + warnParams[warn] + " which may cause problems.")
#Append the force tags to the end (overwrite the other values)
forceParams = params["doxygen_paramsForce"]
for force in forceParams.keys():
config += "\n" + force + " = " + forceParams[force]
#Call doxygen, piping the config to it
with subprocess.Popen([doxymwglobal.config["doxygen_binaryPath"] + "/doxygen.exe", "-"], stdin=subprocess.PIPE, universal_newlines=True) as proc:
proc.communicate(input=config, timeout=20)
#Return after finished
#Reads the doxygen documents at the specified path and returns a list of wikiPages
def readDoxygenHTMLDocs():
#List of all the actual wiki pages
wikiPages = []
#Doxygen generates all it's files with prefixes by type
#This is not an exhaustive list, some configuration patterns have not been tested
#Files, prefix "_"
#Interfaces, prefix "interface_"
#Namespaces, prefix "namespace_"
#Classes, prefix "class_"
#Members lists, suffix "-members"
params = {}
params["doxygen_filePrefixes"] = {
"-members$" : "MEMBERS", #Match members lists first
"^_" : "FILE",
"^namespace_" : "NAMESPACE",
"^class_" : "CLASS",
"^interface_" : "INTERFACE"
}
#Other files we want (useful and don't provide redundancies to MediaWiki functionality)
#Class hierarchy, hierarchy.html
params["doxygen_otherFiles"] = [
"hierarchy"
]
for root, dirs, files in os.walk(doxymwglobal.config["doxygen_tmpPath"] + "/html"):
for file in files:
#Get all the file info
fileAbs = os.path.abspath(root + "\\" + file)
fileAbsPath, t = os.path.split(fileAbs)
fileRel = "./" + os.path.relpath(fileAbs, doxymwglobal.config["doxygen_tmpPath"])
fileRelPath, fileTail = os.path.split(fileRel)
fileName, fileExt = os.path.splitext(fileTail)
#Filter out by extension
if fileExt != ".html":
continue
#Check special files and type
fileDoxyType = None
#Special ("other") files
for other in params["doxygen_otherFiles"]:
if fileName == other:
fileDoxyType = "OTHER"
break
#Check type
if not fileDoxyType:
for regex, type in params["doxygen_filePrefixes"].items():
if re.search(regex, fileName):
fileDoxyType = type
break
#Filter out the html files without type
if fileDoxyType == None:
continue
#Make the doxygen wiki page object
page = DoxygenHTMLPage(fileAbsPath, fileTail, fileDoxyType)
wikiPages.append(page)
return wikiPages
def main():
#( 0 ) Get opts
from doxymwglobal import option #Default opts
#Argv[1] must be a command
if len(sys.argv) < 2:
doxymwglobal.msg(doxymwglobal.msgType.error, "Too few arguments given", usage=True)
option["command"] = sys.argv[1]
if option["command"] != "cleanup" and option["command"] != "update":
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid command specified", usage=True)
#Argv[2:] must be other flags
for arg in sys.argv[2:]:
if arg == "-i" or arg == "--interactive":
option["interactive"] = True
elif arg == "-w" or arg == "--warnIsError":
option["warnIsError"] = True
elif arg == "-h" or arg == "--help":
printHelp()
return
elif arg.find("-d:") == 0 or arg.find("--debug:") == 0:
whichDebug = arg.split(":")[1]
if whichDebug != "doxygen" and whichDebug != "unsafeUpdate" and whichDebug != "whichDelete":
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid debug specified " + whichDebug, usage=True)
else:
option["debug"].append(whichDebug)
elif arg.find("-p:") == 0 or arg.find("--printLevel:") == 0:
printLevel = arg.split(":")[1]
try:
#Try it as an int
printLevelInt = int(printLevel)
option["printLevel"] = doxymwglobal.msgType(printLevelInt)
except ValueError:
try:
#Try it as a string of the MsgType enum
option["printLevel"] = doxymwglobal.msgType[printLevel.lower()]
except KeyError:
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid printLevel " + printLevel, usage=True)
else:
doxymwglobal.msg(doxymwglobal.msgType.error, "Invalid option", usage=True)
#Do the actual operation
if option["command"] == "update":
#( 1 ) Generate the doxygen docs
generateDoxygenHTMLDocs()
#( 2 )Sort through all files and get the ones we want to parse
wikiPages = readDoxygenHTMLDocs()
#( 3 )Ready the page by getting everything into valid wiki markup
for page in wikiPages:
doxymwglobal.msg(doxymwglobal.msgType.info, "Converting " + page.filename)
page.convert(wikiPages)
#Debug the first portion, outputs everything to an html file
if "doxygen" in option["debug"]:
debugPath = doxymwglobal.debugPath()
for page in wikiPages:
doxymwglobal.msg(doxymwglobal.msgType.debug, "Debug output " + page.filename)
fp = open(debugPath + "/" + page.filename, 'w', errors="replace")
strr = page.mwtitle+"<br><br>"+page.mwcontents
fp.write(strr)
return
#( 4 )Perform all the wiki tasks
#Make sure we're logged in
site = pywikibot.Site()
#Make a site, run the command
site = DoxyMWSite(site)
if option["command"] == "cleanup":
site.cleanup()
if option["command"] == "update":
site.update(wikiPages)
#( 5 ) We're done!
doxymwglobal.msg(doxymwglobal.msgType.info, "Done")
if __name__ == '__main__':
main()
|
mit
| -1,648,275,014,650,735,000
| 36.966942
| 169
| 0.560139
| false
| 4.125281
| true
| false
| false
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Representer/__init__.py
|
1
|
25779
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Representer is an important module for beginning to visualize
the structures of the instanced variables in the environnment.
The idea is to use the indenting representation like in the json.dump
function but with a more suitable (but maybe dirty) access to the
AlineaStr of each lines of the output, depending on the state
of the variables. Instances that are created from the decorated class have
a __repr__ method, helping for mentionning for the represented attributes where
do they come from : <Spe> (resp. <Base>) is they were defined at the level of the \_\_class\_\_
and <Instance> (resp. <Class>) if they are getted from the <InstanceVariable>.__dict__
(resp. <InstanceVariable>.__class__.__dict__)
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Classors.Inspecter"
DecorationModuleStr=BaseModuleStr
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
import copy
import inspect
import numpy
import sys
from ShareYourSystem.Standards.Objects import Initiator
#</ImportSpecificModules>
#<DefineLocals>
RepresentingDictIndentStr=" "
RepresentingListIndentStr=" "
RepresentingIndentStr=" /"
RepresentingEofStr="\n"
RepresentingIdBool=True
RepresentingCircularStr="{...}"
RepresentedAlineaStr=""
RepresentedAlreadyIdIntsList=[]
#</DefineLocals>
#<DefineFunctions>
def getRepresentedNumpyArray(_NumpyArray):
#Definition the ShapeList
ShapeList=list(numpy.shape(_NumpyArray))
#debug
'''
print('Representer l.25 : getRepresentedNumpyArray')
print('ShapeList is',ShapeList)
print('')
'''
#Return the array directly if it is small or either a short represented version of it
if (len(ShapeList)==1 and ShapeList[0]<3) or (len(ShapeList)>1 and ShapeList[1]<3):
return str(_NumpyArray)
return "<numpy.ndarray shape "+str(ShapeList)+">"
def getRepresentedPointerStrWithVariable(_Variable,**_KwargVariablesDict):
#debug
'''
print('Representer l.39 : getRepresentedPointerStrWithVariable')
print('')
'''
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#Define
if type(_Variable).__name__=='Database':
RepresentedVariableStr=_Variable._Database__name
else:
RepresentedVariableStr=_Variable.__name__ if hasattr(_Variable,__name__) else ""
#Debug
'''
print('l 85 Representer')
print('type(_Variable).__name__ is ')
print(type(_Variable).__name__)
print('RepresentedVariableStr is ')
print(RepresentedVariableStr)
print('')
'''
#Check
if RepresentingIdBool:
return RepresentedLocalAlineaStr+"<"+RepresentedVariableStr+" ("+_Variable.__class__.__name__+"), "+str(id(_Variable))+">"
else:
return RepresentedLocalAlineaStr+"<"+RepresentedVariableStr+" ("+_Variable.__class__.__name__+")"+" >"
def getRepresentedStrWithDictatedVariable(
_DictatedVariable,**_KwargVariablesDict
):
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#debug
'''
print('Representer l.59 : getRepresentedStrWithDictatedVariable')
print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('')
'''
#Global
global RepresentedAlineaStr
#Definition the LocalRepresentedAlineaStr
LocalRepresentedAlineaStr=RepresentedAlineaStr+"".join(
[RepresentingIndentStr]*(_KwargVariablesDict['RepresentedDeepInt']))
#Init the RepresentedDictStr
RepresentedDictStr="\n"+LocalRepresentedAlineaStr+"{ "
#Scan the Items (integrativ loop)
if type(_DictatedVariable)!=dict and hasattr(_DictatedVariable,"items"):
#debug
'''
print('l 135 Representer')
print('_DictatedVariable is ')
print(_DictatedVariable)
print('')
'''
#items
RepresentedTuplesList=_DictatedVariable.items()
else:
#sort
RepresentedTuplesList=sorted(
_DictatedVariable.iteritems(), key=lambda key_value: key_value[0]
)
#Integrativ loop for seriaizing the items
for __RepresentedKeyStr,__RepresentedValueVariable in RepresentedTuplesList:
#debug
'''
print('Representer l.127')
print('__RepresentedKeyStr is',__RepresentedKeyStr)
print('')
'''
#set the begin of the line
RepresentedDictStr+="\n"+LocalRepresentedAlineaStr+RepresentingDictIndentStr
#Force the cast into Str
if type(__RepresentedKeyStr) not in [unicode,str]:
__RepresentedKeyStr=str(__RepresentedKeyStr)
#Get the WordStrsList
WordStrsList=SYS.getWordStrsListWithStr(__RepresentedKeyStr)
#Init the RepresentedValueVariableStr
RepresentedValueVariableStr="None"
#Split the case if it is a pointing variable or not
if len(WordStrsList)>0:
#Value is displayed
"""
if SYS.getWordStrsListWithStr(__RepresentedKeyStr)[-1]=="Pointer":
#Pointer Case
RepresentedValueVariableStr=getRepresentedPointerStrWithVariable(
__RepresentedValueVariable,
**_KwargVariablesDict
)
"""
"""
elif ''.join(SYS.getWordStrsListWithStr(__RepresentedKeyStr)[-2:])=="PointersList":
#debug
'''
print('__RepresentedValueVariable is ',__RepresentedValueVariable)
print('')
'''
#Pointer Case
RepresentedValueVariableStr=str(
map(
lambda ListedVariable:
getRepresentedPointerStrWithVariable(
ListedVariable,
**_KwargVariablesDict),
__RepresentedValueVariable
)
) if type(__RepresentedValueVariable)==list else "None"
"""
#Special Suffix Cases
if RepresentedValueVariableStr=="None":
#debug
'''
print('go to represent')
print('__RepresentedKeyStr is ',__RepresentedKeyStr)
print('id(__RepresentedValueVariable) is ',id(__RepresentedValueVariable))
print('')
'''
#Other Cases
RepresentedValueVariableStr=getRepresentedStrWithVariable(
__RepresentedValueVariable,
**_KwargVariablesDict
)
#Key and Value Case
RepresentedDictStr+="'"+__RepresentedKeyStr+"' : "+RepresentedValueVariableStr
#Add a last line
RepresentedDictStr+="\n"+LocalRepresentedAlineaStr+"}"
#debug
'''
print('RepresentedDictStr is ',RepresentedDictStr)
print('')
'''
#return the DictStr
return RepresentedDictStr
def getRepresentedStrWithListedVariable(_ListedVariable,**_KwargVariablesDict):
#Global
global RepresentedAlineaStr
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#debug
'''
print('Representer l.166 : getRepresentedStrWithListedVariable')
print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('_ListedVariable is '+str(_ListedVariable))
print('')
'''
#Init the RepresentedDictStr
if type(_ListedVariable)==list:
BeginBracketStr='['
EndBracketStr=']'
else:
BeginBracketStr='('
EndBracketStr=')'
#Definition the LocalRepresentedAlineaStr
LocalRepresentedAlineaStr=RepresentedAlineaStr+"".join(
[RepresentingIndentStr]*(_KwargVariablesDict['RepresentedDeepInt']))
#Do the first Jump
RepresentedListStr="\n"+LocalRepresentedAlineaStr+BeginBracketStr
#Scan the Items (integrativ loop)
for ListedVariableInt,ListedVariable in enumerate(_ListedVariable):
#set the begin of the line
RepresentedListStr+="\n"+LocalRepresentedAlineaStr+RepresentingListIndentStr
#Get the represented version
RepresentedValueVariableStr=getRepresentedStrWithVariable(
ListedVariable,**dict(_KwargVariablesDict,**{'RepresentingAlineaIsBool':False})
)
#Key and Value Case
RepresentedListStr+=str(ListedVariableInt)+" : "+RepresentedValueVariableStr
#Add a last line
RepresentedListStr+="\n"+LocalRepresentedAlineaStr+EndBracketStr
#return the DictStr
return RepresentedListStr
def getRepresentedStrWithVariable(_Variable,**_KwargVariablesDict):
#Define global
global RepresentedAlreadyIdIntsList
#set in the _KwargVariablesDict
if 'RepresentedDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['RepresentedDeepInt']=0
#debug
'''
print('Representer l.213 : getRepresentedStrWithVariable')
#print('_KwargVariablesDict is ',str(_KwargVariablesDict))
#print('_Variable is '+str(_Variable))
print('type(_Variable) is '+str(type(_Variable)))
#print("hasattr(_Variable,'__repr__') is "+str(hasattr(_Variable,"__repr__")))
##if hasattr(_Variable,"__repr__"):
# print('hasattr(_Variable.__class__,"InspectedOrderedDict") is '+str(
# hasattr(_Variable.__class__,"InspectedOrderedDict")))
# if hasattr(_Variable.__class__,"InspectedOrderedDict"):
# print("_Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesSetKeyStr'] is "+str(
# _Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesSetKeyStr']))
# print(_Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesSetKeyStr'])
print('')
'''
#None type
if type(_Variable)==None.__class__:
return "None"
#Dict types print
#if type(_Variable) in [dict,collections.OrderedDict]:
if hasattr(_Variable,'items') and type(_Variable)!=type:
#Increment the deep
_KwargVariablesDict['RepresentedDeepInt']+=1
#debug
'''
print('This is a dictated type so get a represent like a dict')
print('')
'''
#id
RepresentedIdInt=id(_Variable)
#debug
'''
print('RepresentedIdInt is ',RepresentedIdInt)
print('RepresentedAlreadyIdIntsList is ',RepresentedAlreadyIdIntsList)
print('')
'''
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#Debug
'''
print('RepresentedAlreadyIdIntsList is ',RepresentedAlreadyIdIntsList)
print('')
'''
#append
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
#Return the repr of the _Variable but shifted with the RepresentedAlineaStr
RepresentedStr=getRepresentedStrWithDictatedVariable(
_Variable,
**_KwargVariablesDict
)
else:
#Return the circular Str
RepresentedStr=RepresentingCircularStr+getRepresentedPointerStrWithVariable(_Variable)
#Debug
'''
print('RepresentedIdInt is ',RepresentedIdInt)
print('RepresentedStr is ',RepresentedStr)
print('')
'''
#return
return RepresentedStr
#List types print
elif type(_Variable) in [list,tuple]:
#id
RepresentedIdInt=id(_Variable)
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#debug
'''
print('This is a listed type so get a represent like a list')
print('')
'''
#append
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
#Check if it is a List of Objects or Python Types
if all(
map(
lambda ListedVariable:
type(ListedVariable) in [float,int,str,unicode,numpy.float64] or ListedVariable==None,
_Variable
)
)==False:
#Increment the deep
_KwargVariablesDict['RepresentedDeepInt']+=1
#debug
'''
print('Print a represented version of the list')
print('')
'''
#Return
RepresentedStr=getRepresentedStrWithListedVariable(_Variable,**_KwargVariablesDict)
else:
#debug
'''
print('Here just print the list directly')
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#Return
RepresentedStr=RepresentedLocalAlineaStr+repr(
_Variable).replace("\n","\n"+RepresentedLocalAlineaStr)
#return
return RepresentedStr
else:
#Return the circular Str
return RepresentingCircularStr+getRepresentedPointerStrWithVariable(_Variable)
#Instance print
elif type(_Variable).__name__ in ["instancemethod"]:
#Debug
'''
print('Representer l 421')
print('This is a method ')
print('_Variable.__name__ is ',_Variable.__name__)
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#append
RepresentedAlreadyIdIntsList.append(_Variable.im_self)
#return RepresentedAlineaStr+"instancemethod"
RepresentedStr=RepresentedLocalAlineaStr
RepresentedStr+="< bound method "+_Variable.__name__
RepresentedStr+=" of "+str(_Variable.im_self.__class__)
RepresentedStr+=" "+str(id(_Variable.im_self))+" >"
#RepresentedStr='inst'
#return
return RepresentedStr
#Str types
elif type(_Variable) in SYS.StrTypesList:
#debug
'''
print('This is a Str type so get a represent like a Str')
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict['RepresentedDeepInt']==0 else ""
#Return
return RepresentedLocalAlineaStr+_Variable.replace("\n","\n"+RepresentedLocalAlineaStr)
#Other
elif hasattr(_Variable,"__repr__") and hasattr(
_Variable.__class__,"InspectedArgumentDict"
) and '__repr__' in _Variable.__class__.InspectedArgumentDict and _Variable.__class__.InspectedArgumentDict[
'__repr__']['KwargVariablesSetKeyStr']!="":
#debug
'''
print('This is a representer so call the repr of it with the _KwargVariablesDict')
print('type(_Variable) is ',type(_Variable))
print('id(_Variable) is ',id(_Variable))
print('')
'''
#id
RepresentedIdInt=id(_Variable)
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#append
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
#Return the repr of the _Variable but shifted with the RepresentedAlineaStr
RepresentedStr=_Variable.__repr__(**_KwargVariablesDict)
#return
return RepresentedStr
else:
#Return the circular Str
return RepresentingCircularStr+getRepresentedPointerStrWithVariable(_Variable)
else:
#Debug
'''
print('This is not identified so call the repr of it')
print('')
'''
#Definition the Local alinea
RepresentedLocalAlineaStr=RepresentedAlineaStr if _KwargVariablesDict[
'RepresentedDeepInt']==0 else ""
#Define
RepresentedIdInt=id(_Variable)
#Debug
'''
print('RepresentedIdInt is ',RepresentedIdInt)
print('RepresentedAlreadyIdIntsList is ',RepresentedAlreadyIdIntsList)
print('')
'''
#Check if it was already represented
if RepresentedIdInt not in RepresentedAlreadyIdIntsList:
#debug
'''
print('type(_Variable) is ',type(_Variable))
print('')
'''
#Append but only for mutables variable
if type(_Variable) not in [bool,str,int,float]:
RepresentedAlreadyIdIntsList.append(RepresentedIdInt)
else:
#debug
'''
print('_Variable is ',_Variable)
print('')
'''
pass
#Return a repr of the _Variable but shifted with the RepresentedAlineaStr
RepresentedStr=RepresentedLocalAlineaStr+repr(_Variable).replace(
"\n",
"\n"+RepresentedLocalAlineaStr
)
#return
return RepresentedStr
else:
#Return the circular Str
return RepresentedLocalAlineaStr+RepresentingCircularStr+getRepresentedPointerStrWithVariable(
_Variable)
def _print(_Variable,**_KwargVariablesDict):
print(represent(_Variable,**_KwargVariablesDict))
def represent(_Variable,**_KwargVariablesDict):
#Definition the global
global RepresentedAlineaStr,RepresentedAlreadyIdIntsList
#Debug
'''
print('Representer l.545')
print('Reinit the RepresentedAlreadyIdIntsList')
print('')
'''
#Reinit
RepresentedAlreadyIdIntsList=[]
#Debug
'''
print('Representer l.554')
print('_KwargVariablesDict is ',_KwargVariablesDict)
print('')
'''
#Represent without shifting the Strs or not
if 'RepresentingAlineaIsBool' not in _KwargVariablesDict or _KwargVariablesDict['RepresentingAlineaIsBool']:
return getRepresentedStrWithVariable(_Variable,**_KwargVariablesDict)
else:
RepresentedOldAlineaStr=RepresentedAlineaStr
RepresentedAlineaStr=""
RepresentedStr=getRepresentedStrWithVariable(_Variable,**_KwargVariablesDict)
RepresentedAlineaStr=RepresentedOldAlineaStr
return RepresentedStr
#</DefineFunctions>
#Link
def __main__represent(_RepresentingStr,**_KwargVariablesDict):
return represent(
_RepresentingStr,
**dict(_KwargVariablesDict,**{'RepresentingAlineaIsBool':False})
)
def __main__print(_RepresentingStr,**_KwargVariablesDict):
return _print(
_RepresentingStr,
**dict(_KwargVariablesDict,**{'RepresentingAlineaIsBool':False})
)
SYS._str = __main__represent
SYS._print = __main__print
#<DefineClass>
@DecorationClass()
class RepresenterClass(BaseClass):
def default_init(self,**_KwargVariablesDict):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Class):
#debug
'''
print('Representer l.478 : _Class is ',_Class)
print('')
'''
#Call the parent init method
BaseClass.__call__(self,_Class)
#debug
'''
print('Representer l.485 : self.DoClass is ',self.DoClass)
print('')
'''
#Represent
self.represent()
#Return
return _Class
def do_represent(self):
#alias
RepresentedClass=self.DoClass
#debug
'''
print('Representer l.352 : RepresentedClass is ',RepresentedClass)
print('')
'''
#Check
'''
if hasattr(RepresentedClass,'RepresentingKeyStrsList')==False or (
len(RepresentedClass.__bases__)>0 and hasattr(RepresentedClass.__bases__[0
],'RepresentingKeyStrsList') and RepresentedClass.__bases__[0
].RepresentingKeyStrsList==RepresentedClass.RepresentingKeyStrsList):
#init
RepresentedClass.RepresentingKeyStrsList=[]
'''
RepresentedClass.RepresentingKeyStrsList=RepresentedClass.DefaultSetKeyStrsList
#init
#RepresentedClass.RepresentingSkipKeyStrsList=None
#init
#RepresentedClass.RepresentingForceKeyStrsList=None
#set the BaseKeyStrsList
KeyStrsSet=set(
SYS.collect(
RepresentedClass,
'__bases__',
'RepresentingKeyStrsList'
)
)
#KeyStrsSet.difference_update(set(RepresentedClass.RepresentingKeyStrsList))
RepresentedClass.RepresentedBaseKeyStrsList=list(KeyStrsSet)
#Split between the one from the class or not
[
RepresentedClass.RepresentedSpecificKeyStrsList,
RepresentedClass.RepresentedNotSpecificKeyStrsList
]=SYS.groupby(
lambda __KeyStr:
__KeyStr not in RepresentedClass.RepresentedBaseKeyStrsList,
RepresentedClass.RepresentingKeyStrsList
)
#debug
'''
print(
RepresentedClass.__name__,
#Class.__mro__,
#Class.RepresentedNotGettingStrsList,
list(RepresentedClass.RepresentedBasedKeyStrsList)
)
'''
#Add to the KeyStrsList
RepresentedClass.KeyStrsList+=[
'RepresentingKeyStrsList',
'RepresentingSkipKeyStrsList',
'RepresentingForceKeyStrsList',
'RepresentedBaseKeyStrsList',
'RepresentedSpecificKeyStrsList',
'RepresentedNotSpecificKeyStrsList',
]
"""
#Definition the representing methods
def represent(_InstanceVariable,**_KwargVariablesDict):
#debug
'''
_InstanceVariable.debug(('RepresentedClass',RepresentedClass,[
'RepresentingKeyStrsList',
'RepresentedBaseKeyStrsList',
'RepresentedSpecificKeyStrsList',
'RepresentedNotSpecificKeyStrsList'
]))
'''
#Represent the Specific KeyStrs
RepresentedTuplesList=map(
lambda __RepresentingSpecificKeyStr:
(
"<Spe>"+("<Instance>"
if __RepresentingSpecificKeyStr in _InstanceVariable.__dict__
else "<Class>"
)+__RepresentingSpecificKeyStr
,
getattr(_InstanceVariable,__RepresentingSpecificKeyStr)
),
RepresentedClass.RepresentedSpecificKeyStrsList
)
#Represent the BaseKeyStrs
if 'RepresentingBaseKeyStrsListBool' in _KwargVariablesDict and _KwargVariablesDict['RepresentingBaseKeyStrsListBool']:
RepresentedTuplesList+=map(
lambda __NotSpecificKeyStrsList:
(
"<Base>"+("<Instance>"
if __NotSpecificKeyStrsList in _InstanceVariable.__dict__
else "<Class>"
)+__NotSpecificKeyStrsList
,
getattr(_InstanceVariable,__NotSpecificKeyStrsList)
),
RepresentedClass.RepresentedNotSpecificKeyStrsList
)
RepresentedTuplesList+=map(
lambda __RepresentedBaseKeyStr:
(
"<Base>"+("<Instance>"
if __RepresentedBaseKeyStr in _InstanceVariable.__dict__
else "<Class>"
)+__RepresentedBaseKeyStr
,
getattr(_InstanceVariable,__RepresentedBaseKeyStr)
),
RepresentedClass.RepresentedBaseKeyStrsList
)
#Represent the NewInstanceKeyStrs in the __dict__
if 'RepresentingNewInstanceKeyStrsListBool' not in _KwargVariablesDict or _KwargVariablesDict[
'RepresentingNewInstanceKeyStrsListBool']:
#filter
RepresentedNewInstanceTuplesList=SYS._filter(
lambda __NewItemTuple:
__NewItemTuple[0] not in RepresentedClass.DefaultSetKeyStrsList+RepresentedClass.DefaultBaseSetKeyStrsList,
_InstanceVariable.__dict__.items()
)
#Debug
'''
print('RepresentedNewInstanceTuplesList is ')
print(RepresentedNewInstanceTuplesList)
print('RepresentedClass.RepresentingSkipKeyStrsList is ')
print(RepresentedClass.RepresentingSkipKeyStrsList)
print('')
'''
#Check
if _InstanceVariable.RepresentingSkipKeyStrsList==None:
_InstanceVariable.RepresentingSkipKeyStrsList=[]
#filter
RepresentedNewInstanceTuplesList=SYS._filter(
lambda __RepresentedNewInstanceTuple:
__RepresentedNewInstanceTuple[0] not in _InstanceVariable.RepresentingSkipKeyStrsList,
RepresentedNewInstanceTuplesList
)
#Debug
'''
print('RepresentedNewInstanceTuplesList is ')
print(RepresentedNewInstanceTuplesList)
print('')
'''
#map
RepresentedTuplesList+=map(
lambda __NewItemTuple:
(
"<New><Instance>"+__NewItemTuple[0],
__NewItemTuple[1]
),
RepresentedNewInstanceTuplesList
)
#Represent the NewClassKeyStrs in the _RepresentedClass__.__dict__
if 'RepresentingNewClassKeyStrsListBool' not in _KwargVariablesDict or _KwargVariablesDict[
'RepresentingNewClassKeyStrsListBool']:
RepresentedTuplesList+=map(
lambda __NewKeyStr:
(
"<New><Class>"+__NewKeyStr,
_InstanceVariable.__class__.__dict__[__NewKeyStr]
),
SYS._filter(
lambda __KeyStr:
__KeyStr not in RepresentedClass.KeyStrsList and __KeyStr not in _InstanceVariable.__dict__,
SYS.getKeyStrsListWithClass(
_InstanceVariable.__class__
)
)
)
if 'RepresentingNotConcludeTuplesList' in _KwargVariablesDict:
#Debug
'''
print('l 792 Representer')
print('RepresentedTuplesList is ')
print(RepresentedTuplesList)
print('')
'''
#filter
RepresentedTuplesList=SYS._filter(
lambda __RepresentedTuple:
any(
map(
lambda __RepresentingNotConcludeTuple:
__RepresentingNotConcludeTuple[0](
__RepresentedTuple,
__RepresentingNotConcludeTuple[1]
),
_KwargVariablesDict['RepresentingNotConcludeTuplesList']
)
)==False,
RepresentedTuplesList
)
#Debug
'''
print('l 815 Representer')
print('RepresentedTuplesList is ')
print(RepresentedTuplesList)
print('')
'''
if 'RepresentingKeyStrsList' in _KwargVariablesDict:
RepresentedTuplesList+=map(
lambda __RepresentingKeyStr:
(
"<Spe><Instance>"+__RepresentingKeyStr,
_InstanceVariable.__dict__[__RepresentingKeyStr]
)
if __RepresentingKeyStr in _InstanceVariable.__dict__ and __RepresentingKeyStr not in RepresentedClass.DefaultSetKeyStrsList
else(
(
"<Base><Instance>"+__RepresentingKeyStr,
_InstanceVariable.__dict__[__RepresentingKeyStr]
)
if __RepresentingKeyStr in _InstanceVariable.__dict__ and __RepresentingKeyStr in RepresentedClass.DefaultBaseSetKeyStrsList
else
(
(
"<Base><Class>"+__RepresentingKeyStr,
getattr(_InstanceVariable,__RepresentingKeyStr)
)
if __RepresentingKeyStr not in _InstanceVariable.__dict__
else
(
"<New><Instance>"+__RepresentingKeyStr,
_InstanceVariable.__dict__[__RepresentingKeyStr]
)
)
),
_KwargVariablesDict['RepresentingKeyStrsList'
]+_InstanceVariable.RepresentingForceKeyStrsList
)
#Append
global RepresentedAlreadyIdIntsList
#debug
'''
print('Represener l.629')
print('id(_InstanceVariable) is ',id(_InstanceVariable))
print('_InstanceVariable not in RepresentedAlreadyIdIntsList is ',str(
_InstanceVariable not in RepresentedAlreadyIdIntsList))
print('')
'''
#define the RepresentedStr
return getRepresentedPointerStrWithVariable(
_InstanceVariable
)+getRepresentedStrWithVariable(
dict(RepresentedTuplesList),
**_KwargVariablesDict
)
"""
#Bound and set in the InspectedOrderedDict
#RepresentedClass.__repr__=represent
#RepresentedClass.InspectedArgumentDict['__repr__']=SYS.ArgumentDict(
# RepresentedClass.__repr__)
#</DefineClass>
#set in the InitiatorClass
Initiator.InitiatorClass.RepresentedNotGettingStrsList=['InitiatingUpdateBool']
Initiator.InitiatorClass.RepresentedSpecificKeyStrsList=['InitiatingUpdateBool']
|
mit
| 3,242,304,788,324,602,400
| 25.881126
| 132
| 0.704255
| false
| 3.384848
| false
| false
| false
|
peheje/baselines
|
baselines/deepq/experiments/atari/enjoy.py
|
1
|
2514
|
import argparse
import gym
import os
import numpy as np
from gym.monitoring import VideoRecorder
import baselines.common.tf_util as U
from baselines import deepq
from baselines.common.misc_util import (
boolean_flag,
SimpleMonitor,
)
from baselines.common.atari_wrappers_deprecated import wrap_dqn
from baselines.deepq.experiments.atari.model import model, dueling_model
def parse_args():
parser = argparse.ArgumentParser("Run an already learned DQN model.")
# Environment
parser.add_argument("--env", type=str, required=True, help="name of the game")
parser.add_argument("--model-dir", type=str, default=None, help="load model from this directory. ")
parser.add_argument("--video", type=str, default=None, help="Path to mp4 file where the video of first episode will be recorded.")
boolean_flag(parser, "stochastic", default=True, help="whether or not to use stochastic actions according to models eps value")
boolean_flag(parser, "dueling", default=False, help="whether or not to use dueling model")
return parser.parse_args()
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
env = SimpleMonitor(env)
env = wrap_dqn(env)
return env
def play(env, act, stochastic, video_path):
num_episodes = 0
video_recorder = None
video_recorder = VideoRecorder(
env, video_path, enabled=video_path is not None)
obs = env.reset()
while True:
env.unwrapped.render()
video_recorder.capture_frame()
action = act(np.array(obs)[None], stochastic=stochastic)[0]
obs, rew, done, info = env.step(action)
if done:
obs = env.reset()
if len(info["rewards"]) > num_episodes:
if len(info["rewards"]) == 1 and video_recorder.enabled:
# save video of first episode
print("Saved video.")
video_recorder.close()
video_recorder.enabled = False
print(info["rewards"][-1])
num_episodes = len(info["rewards"])
if __name__ == '__main__':
with U.make_session(4) as sess:
args = parse_args()
env = make_env(args.env)
act = deepq.build_act(
make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name),
q_func=dueling_model if args.dueling else model,
num_actions=env.action_space.n)
U.load_state(os.path.join(args.model_dir, "saved"))
play(env, act, args.stochastic, args.video)
|
mit
| -9,141,925,088,257,811,000
| 35.434783
| 134
| 0.650756
| false
| 3.596567
| false
| false
| false
|
pbs/django-filer
|
filer/management/commands/take_out_filer_trash.py
|
1
|
1910
|
from django.core.management.base import BaseCommand
from filer.models import File, Folder
from filer import settings as filer_settings
from django.utils import timezone
from datetime import timedelta
class Command(BaseCommand):
help = "Hard-deletes old files and folders from filer trash."
def handle(self, *args, **options):
no_of_sec = filer_settings.FILER_TRASH_CLEAN_INTERVAL
time_threshold = timezone.now() - timedelta(seconds=no_of_sec)
files_ids = File.trash.filter(deleted_at__lt=time_threshold)\
.values_list('id', flat=True)
folder_ids = Folder.trash.filter(deleted_at__lt=time_threshold)\
.order_by('tree_id', '-level').values_list('id', flat=True)
if not folder_ids and not files_ids:
self.stdout.write("No old files or folders.\n")
return
for file_id in files_ids:
a_file = File.trash.get(id=file_id)
self.stdout.write("Deleting file %s: %s\n" % (
file_id, repr(a_file.file.name)))
try:
a_file.delete(to_trash=False)
except Exception as e:
self.stderr.write("%s\n" % str(e))
for folder_id in folder_ids:
a_folder = Folder.trash.get(id=folder_id)
ancestors = a_folder.get_ancestors(include_self=True)
path = repr('/'.join(ancestors.values_list('name', flat=True)))
if File.all_objects.filter(folder=folder_id).exists():
self.stdout.write("Cannot delete folder %s: %s since is "
"not empty.\n" % (folder_id, path))
continue
self.stdout.write(
"Deleting folder %s: %s\n" % (folder_id, path))
try:
a_folder.delete(to_trash=False)
except Exception as e:
self.stderr.write("%s\n" % str(e))
|
bsd-3-clause
| -1,781,223,535,377,464,600
| 40.521739
| 75
| 0.577487
| false
| 3.759843
| false
| false
| false
|
vmagnin/pyxmltv
|
telecharger_xmltv.py
|
1
|
2764
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Vincent MAGNIN, 2016-2019
'''
Fonction pour télécharger un fichier XMLTV
'''
import zipfile
import os
import re
import pickle
from urllib.request import urlretrieve, urlopen, URLError
def telecharger_xmltv(url, nom_fichier):
"""
Télécharge le fichier situé à url si une nouvelle version est disponible
"""
# On récupère l'ETag HTTP du fichier déjà éventuellement
# présent dans le répertoire du script :
try:
with open("ETag_xmltv.pickle", 'rb') as FICHIER_ETag:
ANCIEN_ETag = pickle.load(FICHIER_ETag)
except OSError:
ANCIEN_ETag = "0"
# On récupère l'ETag HTTP du zip sur le serveur :
try:
entete = urlopen(url+nom_fichier).info()
match = re.search(r'ETag: "(\w+-\w+-\w+)"', str(entete))
ETag = match.group(1)
except URLError:
ETag = "00"
print("URL erronée")
except AttributeError: # Si match est vide (pas de ETag disponible)
ETag = "00"
# On essaie d'utiliser à la place le champ Last-Modified
try:
entete = urlopen(url+nom_fichier).info()
match = re.search(r'Last-Modified: (.*)', str(entete))
ETag = match.group(1)
except AttributeError:
ANCIEN_ETag = "0" # On force le téléchargement du zip
# On retélécharge le zip s'il a été modifié sur le serveur:
if ETag != ANCIEN_ETag:
print("Chargement de la dernière version en ligne...")
try:
urlretrieve(url+nom_fichier, nom_fichier)
with zipfile.ZipFile(nom_fichier, 'r') as zfile:
zfile.extractall()
# On sauvegarde l'ETag du fichier zip :
with open("ETag_xmltv.pickle", 'wb') as FICHIER_ETag:
pickle.dump(ETag, FICHIER_ETag)
except URLError:
print("Attention ! Téléchargement nouveau fichier impossible...")
if not os.access(nom_fichier, os.F_OK):
print("Erreur : pas de fichier dans le répertoire courant !")
exit(2)
|
gpl-3.0
| -181,304,397,692,986,200
| 35.972973
| 77
| 0.638523
| false
| 3.352941
| false
| false
| false
|
mmgen/mmgen
|
test/test_py_d/ts_ref.py
|
1
|
13630
|
#!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, command-line Bitcoin cold storage solution
# Copyright (C)2013-2021 The MMGen Project <mmgen@tuta.io>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ts_ref.py: Reference file tests for the test.py test suite
"""
import os
from mmgen.globalvars import g
from mmgen.opts import opt
from mmgen.wallet import MMGenMnemonic
from ..include.common import *
from .common import *
from .ts_base import *
from .ts_shared import *
wpasswd = 'reference password'
class TestSuiteRef(TestSuiteBase,TestSuiteShared):
'saved reference address, password and transaction files'
tmpdir_nums = [8]
networks = ('btc','btc_tn','ltc','ltc_tn')
passthru_opts = ('daemon_data_dir','rpc_port','coin','testnet')
sources = {
'ref_addrfile': '98831F3A{}[1,31-33,500-501,1010-1011]{}.addrs',
'ref_segwitaddrfile':'98831F3A{}-S[1,31-33,500-501,1010-1011]{}.addrs',
'ref_bech32addrfile':'98831F3A{}-B[1,31-33,500-501,1010-1011]{}.addrs',
'ref_keyaddrfile': '98831F3A{}[1,31-33,500-501,1010-1011]{}.akeys.mmenc',
'ref_passwdfile_b32_24': '98831F3A-фубар@crypto.org-b32-24[1,4,1100].pws',
'ref_passwdfile_b32_12': '98831F3A-фубар@crypto.org-b32-12[1,4,1100].pws',
'ref_passwdfile_b58_10': '98831F3A-фубар@crypto.org-b58-10[1,4,1100].pws',
'ref_passwdfile_b58_20': '98831F3A-фубар@crypto.org-b58-20[1,4,1100].pws',
'ref_passwdfile_hex_32': '98831F3A-фубар@crypto.org-hex-32[1,4,1100].pws',
'ref_passwdfile_hex_48': '98831F3A-фубар@crypto.org-hex-48[1,4,1100].pws',
'ref_passwdfile_hex_64': '98831F3A-фубар@crypto.org-hex-64[1,4,1100].pws',
'ref_passwdfile_bip39_12': '98831F3A-фубар@crypto.org-bip39-12[1,4,1100].pws',
'ref_passwdfile_bip39_18': '98831F3A-фубар@crypto.org-bip39-18[1,4,1100].pws',
'ref_passwdfile_bip39_24': '98831F3A-фубар@crypto.org-bip39-24[1,4,1100].pws',
'ref_passwdfile_xmrseed_25': '98831F3A-фубар@crypto.org-xmrseed-25[1,4,1100].pws',
'ref_passwdfile_hex2bip39_12': '98831F3A-фубар@crypto.org-hex2bip39-12[1,4,1100].pws',
'ref_tx_file': { # data shared with ref_altcoin, autosign
'btc': ('0B8D5A[15.31789,14,tl=1320969600].rawtx',
'0C7115[15.86255,14,tl=1320969600].testnet.rawtx'),
'ltc': ('AF3CDF-LTC[620.76194,1453,tl=1320969600].rawtx',
'A5A1E0-LTC[1454.64322,1453,tl=1320969600].testnet.rawtx'),
'bch': ('460D4D-BCH[10.19764,tl=1320969600].rawtx',
'359FD5-BCH[6.68868,tl=1320969600].testnet.rawtx'),
'eth': ('88FEFD-ETH[23.45495,40000].rawtx',
'B472BD-ETH[23.45495,40000].testnet.rawtx'),
'mm1': ('5881D2-MM1[1.23456,50000].rawtx',
'6BDB25-MM1[1.23456,50000].testnet.rawtx'),
'etc': ('ED3848-ETC[1.2345,40000].rawtx','')
},
}
chk_data = {
'ref_subwallet_sid': {
'98831F3A:32L':'D66B4885',
'98831F3A:1S':'20D95B09',
},
'ref_addrfile_chksum': {
'btc': ('6FEF 6FB9 7B13 5D91','424E 4326 CFFE 5F51'),
'ltc': ('AD52 C3FE 8924 AAF0','4EBE 2E85 E969 1B30'),
},
'ref_segwitaddrfile_chksum': {
'btc': ('06C1 9C87 F25C 4EE6','072C 8B07 2730 CB7A'),
'ltc': ('63DF E42A 0827 21C3','5DD1 D186 DBE1 59F2'),
},
'ref_bech32addrfile_chksum': {
'btc': ('9D2A D4B6 5117 F02E','0527 9C39 6C1B E39A'),
'ltc': ('FF1C 7939 5967 AB82','ED3D 8AA4 BED4 0B40'),
},
'ref_keyaddrfile_chksum': {
'btc': ('9F2D D781 1812 8BAD','88CC 5120 9A91 22C2'),
'ltc': ('B804 978A 8796 3ED4','98B5 AC35 F334 0398'),
},
'ref_passwdfile_b32_12_chksum': '7252 CD8D EF0D 3DB1',
'ref_passwdfile_b32_24_chksum': '8D56 3845 A072 A5B9',
'ref_passwdfile_b58_10_chksum': '534F CC1A 6701 9FED',
'ref_passwdfile_b58_20_chksum': 'DDD9 44B0 CA28 183F',
'ref_passwdfile_hex_32_chksum': '05C7 3678 E25E BC32',
'ref_passwdfile_hex_48_chksum': '7DBB FFD0 633E DE6F',
'ref_passwdfile_hex_64_chksum': 'F11D CB0A 8AE3 4D21',
'ref_passwdfile_bip39_12_chksum': 'BF57 02A3 5229 CF18',
'ref_passwdfile_bip39_18_chksum': '31D3 1656 B7DC 27CF',
'ref_passwdfile_bip39_24_chksum': 'E565 3A59 7D91 4671',
'ref_passwdfile_xmrseed_25_chksum': 'B488 21D3 4539 968D',
'ref_passwdfile_hex2bip39_12_chksum': '93AD 4AE2 03D1 8A0A',
}
cmd_group = ( # TODO: move to tooltest2
('ref_words_to_subwallet_chk1','subwallet generation from reference words file (long subseed)'),
('ref_words_to_subwallet_chk2','subwallet generation from reference words file (short subseed)'),
('ref_subwallet_addrgen1','subwallet address file generation (long subseed)'),
('ref_subwallet_addrgen2','subwallet address file generation (short subseed)'),
('ref_subwallet_keygen1','subwallet key-address file generation (long subseed)'),
('ref_subwallet_keygen2','subwallet key-address file generation (short subseed)'),
('ref_addrfile_chk', 'saved reference address file'),
('ref_segwitaddrfile_chk','saved reference address file (segwit)'),
('ref_bech32addrfile_chk','saved reference address file (bech32)'),
('ref_keyaddrfile_chk','saved reference key-address file'),
('ref_passwdfile_chk_b58_20','saved reference password file (base58, 20 chars)'),
('ref_passwdfile_chk_b58_10','saved reference password file (base58, 10 chars)'),
('ref_passwdfile_chk_b32_24','saved reference password file (base32, 24 chars)'),
('ref_passwdfile_chk_b32_12','saved reference password file (base32, 12 chars)'),
('ref_passwdfile_chk_hex_32','saved reference password file (hexadecimal, 32 chars)'),
('ref_passwdfile_chk_hex_48','saved reference password file (hexadecimal, 48 chars)'),
('ref_passwdfile_chk_hex_64','saved reference password file (hexadecimal, 64 chars)'),
('ref_passwdfile_chk_bip39_12','saved reference password file (BIP39, 12 words)'),
('ref_passwdfile_chk_bip39_18','saved reference password file (BIP39, 18 words)'),
('ref_passwdfile_chk_bip39_24','saved reference password file (BIP39, 24 words)'),
('ref_passwdfile_chk_xmrseed_25','saved reference password file (Monero new-style mnemonic, 25 words)'),
('ref_passwdfile_chk_hex2bip39_12','saved reference password file (hex-to-BIP39, 12 words)'),
# Create the fake inputs:
# ('txcreate8', 'transaction creation (8)'),
('ref_tx_chk', 'signing saved reference tx file'),
('ref_brain_chk_spc3', 'saved brainwallet (non-standard spacing)'),
('ref_dieroll_chk_seedtruncate','saved dieroll wallet with extra entropy bits'),
('ref_tool_decrypt', 'decryption of saved MMGen-encrypted file'),
)
@property
def nw_desc(self):
return '{} {}'.format(self.proto.coin,('Mainnet','Testnet')[self.proto.testnet])
def _get_ref_subdir_by_coin(self,coin):
return {'btc': '',
'bch': '',
'ltc': 'litecoin',
'eth': 'ethereum',
'etc': 'ethereum_classic',
'xmr': 'monero',
'zec': 'zcash',
'dash': 'dash' }[coin.lower()]
@property
def ref_subdir(self):
return self._get_ref_subdir_by_coin(self.proto.coin)
def ref_words_to_subwallet_chk1(self):
return self.ref_words_to_subwallet_chk('32L')
def ref_words_to_subwallet_chk2(self):
return self.ref_words_to_subwallet_chk('1S')
def ref_words_to_subwallet_chk(self,ss_idx):
wf = dfl_words_file
ocls = MMGenMnemonic
args = ['-d',self.tr.trash_dir,'-o',ocls.fmt_codes[-1],wf,ss_idx]
t = self.spawn('mmgen-subwalletgen',args,extra_desc='(generate subwallet)')
t.expect('Generating subseed {}'.format(ss_idx))
chk_sid = self.chk_data['ref_subwallet_sid']['98831F3A:{}'.format(ss_idx)]
fn = t.written_to_file(capfirst(ocls.desc))
assert chk_sid in fn,'incorrect filename: {} (does not contain {})'.format(fn,chk_sid)
ok()
t = self.spawn('mmgen-walletchk',[fn],extra_desc='(check subwallet)')
t.expect(r'Valid MMGen native mnemonic data for Seed ID ([0-9A-F]*)\b',regex=True)
sid = t.p.match.group(1)
assert sid == chk_sid,'subseed ID {} does not match expected value {}'.format(sid,chk_sid)
t.read()
return t
def ref_subwallet_addrgen(self,ss_idx,target='addr'):
wf = dfl_words_file
args = ['-d',self.tr.trash_dir,'--subwallet='+ss_idx,wf,'1-10']
t = self.spawn('mmgen-{}gen'.format(target),args)
t.expect('Generating subseed {}'.format(ss_idx))
chk_sid = self.chk_data['ref_subwallet_sid']['98831F3A:{}'.format(ss_idx)]
assert chk_sid == t.expect_getend('Checksum for .* data ',regex=True)[:8]
if target == 'key':
t.expect('Encrypt key list? (y/N): ','n')
fn = t.written_to_file(('Addresses','Secret keys')[target=='key'])
assert chk_sid in fn,'incorrect filename: {} (does not contain {})'.format(fn,chk_sid)
return t
def ref_subwallet_addrgen1(self):
return self.ref_subwallet_addrgen('32L')
def ref_subwallet_addrgen2(self):
return self.ref_subwallet_addrgen('1S')
def ref_subwallet_keygen1(self):
return self.ref_subwallet_addrgen('32L',target='key')
def ref_subwallet_keygen2(self):
return self.ref_subwallet_addrgen('1S',target='key')
def ref_addrfile_chk(
self,
ftype = 'addr',
coin = None,
subdir = None,
pfx = None,
mmtype = None,
add_args = [],
id_key = None,
pat = None ):
pat = pat or f'{self.nw_desc}.*Legacy'
af_key = 'ref_{}file'.format(ftype) + ('_' + id_key if id_key else '')
af_fn = TestSuiteRef.sources[af_key].format(pfx or self.altcoin_pfx,'' if coin else self.tn_ext)
af = joinpath(ref_dir,(subdir or self.ref_subdir,'')[ftype=='passwd'],af_fn)
coin_arg = [] if coin == None else ['--coin='+coin]
tool_cmd = ftype.replace('segwit','').replace('bech32','')+'file_chksum'
t = self.spawn('mmgen-tool',coin_arg+['--verbose','-p1',tool_cmd,af]+add_args)
if ftype == 'keyaddr':
t.do_decrypt_ka_data(hp=ref_kafile_hash_preset,pw=ref_kafile_pass,have_yes_opt=True)
chksum_key = '_'.join([af_key,'chksum'] + ([coin.lower()] if coin else []) + ([mmtype] if mmtype else []))
rc = self.chk_data[chksum_key]
ref_chksum = rc if (ftype == 'passwd' or coin) else rc[self.proto.base_coin.lower()][self.proto.testnet]
if pat:
t.expect(pat,regex=True)
t.expect(chksum_pat,regex=True)
m = t.p.match.group(0)
t.read()
cmp_or_die(ref_chksum,m)
return t
def ref_segwitaddrfile_chk(self):
if not 'S' in self.proto.mmtypes:
return skip(f'not supported by {self.proto.cls_name} protocol')
return self.ref_addrfile_chk(ftype='segwitaddr',pat='{}.*Segwit'.format(self.nw_desc))
def ref_bech32addrfile_chk(self):
if not 'B' in self.proto.mmtypes:
return skip(f'not supported by {self.proto.cls_name} protocol')
return self.ref_addrfile_chk(ftype='bech32addr',pat='{}.*Bech32'.format(self.nw_desc))
def ref_keyaddrfile_chk(self):
return self.ref_addrfile_chk(ftype='keyaddr')
def ref_passwdfile_chk(self,key,pat):
return self.ref_addrfile_chk(ftype='passwd',id_key=key,pat=pat)
def ref_passwdfile_chk_b58_20(self): return self.ref_passwdfile_chk(key='b58_20',pat=r'Base58.*len.* 20\b')
def ref_passwdfile_chk_b58_10(self): return self.ref_passwdfile_chk(key='b58_10',pat=r'Base58.*len.* 10\b')
def ref_passwdfile_chk_b32_24(self): return self.ref_passwdfile_chk(key='b32_24',pat=r'Base32.*len.* 24\b')
def ref_passwdfile_chk_b32_12(self): return self.ref_passwdfile_chk(key='b32_12',pat=r'Base32.*len.* 12\b')
def ref_passwdfile_chk_hex_32(self): return self.ref_passwdfile_chk(key='hex_32',pat=r'Hexadec.*len.* 32\b')
def ref_passwdfile_chk_hex_48(self): return self.ref_passwdfile_chk(key='hex_48',pat=r'Hexadec.*len.* 48\b')
def ref_passwdfile_chk_hex_64(self): return self.ref_passwdfile_chk(key='hex_64',pat=r'Hexadec.*len.* 64\b')
def ref_passwdfile_chk_bip39_12(self): return self.ref_passwdfile_chk(key='bip39_12',pat=r'BIP39.*len.* 12\b')
def ref_passwdfile_chk_bip39_18(self): return self.ref_passwdfile_chk(key='bip39_18',pat=r'BIP39.*len.* 18\b')
def ref_passwdfile_chk_bip39_24(self): return self.ref_passwdfile_chk(key='bip39_24',pat=r'BIP39.*len.* 24\b')
def ref_passwdfile_chk_xmrseed_25(self): return self.ref_passwdfile_chk(key='xmrseed_25',pat=r'Mon.*len.* 25\b')
def ref_passwdfile_chk_hex2bip39_12(self): return self.ref_passwdfile_chk(key='hex2bip39_12',pat=r'BIP39.*len.* 12\b')
def ref_tx_chk(self):
fn = self.sources['ref_tx_file'][self.proto.coin.lower()][bool(self.tn_ext)]
if not fn: return
tf = joinpath(ref_dir,self.ref_subdir,fn)
wf = dfl_words_file
self.write_to_tmpfile(pwfile,wpasswd)
pf = joinpath(self.tmpdir,pwfile)
return self.txsign(wf,tf,pf,save=False,has_label=True,view='y')
def ref_brain_chk_spc3(self):
return self.ref_brain_chk(bw_file=ref_bw_file_spc)
def ref_dieroll_chk_seedtruncate(self):
wf = joinpath(ref_dir,'overflow128.b6d')
return self.walletchk(wf,None,sid='8EC6D4A2')
def ref_tool_decrypt(self):
f = joinpath(ref_dir,ref_enc_fn)
if not g.debug_utf8:
disable_debug()
dec_file = joinpath(self.tmpdir,'famous.txt')
t = self.spawn('mmgen-tool', ['-q','decrypt',f,'outfile='+dec_file,'hash_preset=1'])
if not g.debug_utf8:
restore_debug()
t.passphrase('user data',tool_enc_passwd)
t.written_to_file('Decrypted data')
dec_txt = read_from_file(dec_file)
imsg_r(dec_txt)
cmp_or_die(sample_text+'\n',dec_txt) # file adds a newline to sample_text
return t
|
gpl-3.0
| -1,095,619,924,337,389,300
| 45
| 119
| 0.688504
| false
| 2.500921
| true
| false
| false
|
sabas1080/InstagramPi
|
InstagramPi.py
|
1
|
4484
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# InstagramPi Gafas for Instagram with Raspberry Pi Zero
#
# Autor: Andrés Sabas @ Feb 2017
#
#
# Use text editor to edit the script and type in valid Instagram username/password
import atexit
import picamera
import os
import time
import random
from os import listdir
from os.path import isfile, join
from random import randint
import atexit
import RPi.GPIO as GPIO
from InstagramAPI import InstagramAPI
butOp = 17 # Broadcom pin 17
butTake = 27 # Broadcom pin 17
ledPin = 16 # Broadcom pin 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)
effects = ['none','negative','solarize','sketch','denoise','emboss','oilpaint','hatch','gpen','pastel','watercolor','film','blur','saturation','colorswap','washedout','posterise','colorpoint','colorbalance','cartoon','deinterlace1','deinterlace2']
saveIdx = -1 # Image index for saving (-1 = none set yet)
Option = True #Option Video(True) or Photo(False)
# Init camera and set up default values
camera = picamera.PiCamera()
atexit.register(camera.close)
camera.image_effect = effects[0]
#camera.resolution = sizeData[sizeMode][1]
#camera.crop = sizeData[sizeMode][2]
#camera.crop = (0.0, 0.0, 1.0, 1.0)
PhotoPath = "/home/pi/InstagramPi/images" # Change Directory to Folder with Pics that you want to upload
IGUSER = "xhabas" # Change to your Instagram USERNAME
PASSWD = "" # Change to your Instagram Password
#INSTAGRAM_FILE_NAME = "instagram.txt" # Text file to store your password
#if PASSWD == "":
# with open(INSTAGRAM_FILE_NAME, 'r') as f:
# PASSWD = f.readlines()[0];
# Change to your Photo Hashtag
IGCaption = "Hi from Raspberry Pi #PInstagram"
# Change to your Video Hashtag
IGCaptionVideo = "Hi from Raspberry Pi #PInstagram"
def TakeVideo():
os.chdir(PhotoPath)
#Delete previous videos
bashCommand = "rm -rf video.h264 video.avi photothumbnail.JPG"
os.system(bashCommand)
print ("Record Video")
camera.capture("photothumbnail.JPG", format='jpeg',thumbnail=None)
camera.start_recording('video.h264' )
time.sleep(10)
camera.stop_recording()
#Convert video to spectacles effect
# Thanks https://github.com/fabe/spectacles-cli/issues/1
bashCommand = "ffmpeg -i video.h264 -i overlay.png -map 0:a? -filter_complex \"scale=-2:720[rescaled];[rescaled]crop=ih:ih:iw/4:0[crop];[crop]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2\" output.h264"
os.system(bashCommand)
#Convert to format avi
bashCommand = "ffmpeg -f h264 -i output.h264 -c libx264 -an video.avi -y"
os.system(bashCommand)
print ("Now Uploading this Video to instagram")
igapi.uploadVideo("video.avi", thumbnail="photothumbnail.JPG", caption=IGCaptionVideo);
print ("Progress : Done")
#n = randint(600,1200)
#print ("Sleep upload for seconds: " + str(n))
#time.sleep(n)
def TakePhoto():
global saveIdx
print ("Take Photo")
os.chdir(PhotoPath)
ListFiles = [f for f in listdir(PhotoPath) if isfile(join(PhotoPath, f))]
print ("Total Photo in this folder:" + str (len(ListFiles)))
while True:
filename = PhotoPath + '/IMG_' + '%04d' % saveIdx + '.JPG'
if not os.path.isfile(filename): break
saveIdx += 1
if saveIdx > 9999: saveIdx = 0
camera.capture(filename, format='jpeg',thumbnail=None)
for i in range(len(ListFiles)):
photo = ListFiles[i]
print ("Progress :" + str([i+1]) + " of " + str(len(ListFiles)))
print ("Now Uploading this photo to instagram: " + photo)
#igapi.uploadPhoto(photo,caption=IGCaption,upload_id=None)
igapi.uploadPhoto(photo,caption=IGCaption,upload_id=None)
# sleep for random between 600 - 1200s
#n = randint(600,1200)
#print ("Sleep upload for seconds: " + str(n))
#time.sleep(n)
#Start Login and Uploading Photo
igapi = InstagramAPI(IGUSER,PASSWD)
igapi.login() # login
try:
while 1:
if GPIO.input(butTake): # button is released
if Option:
TakePhoto() #Take Photo
else:
TakeVideo() #Take Video
if GPIO.input(butOp):
Option=True; #Mode Video
else:
Option=False; #Mode Photo
except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:
GPIO.cleanup() # cleanup all GPIO
|
agpl-3.0
| -4,242,357,488,178,768,000
| 32.706767
| 247
| 0.662726
| false
| 3.257994
| false
| false
| false
|
amolenaar/gaphor
|
gaphor/ui/tests/test_mainwindow.py
|
1
|
1370
|
import pytest
from gaphor.application import Session
from gaphor.core.modeling import Diagram
from gaphor.ui.abc import UIComponent
from gaphor.ui.event import DiagramOpened
@pytest.fixture
def session():
session = Session(
services=[
"event_manager",
"component_registry",
"element_factory",
"modeling_language",
"properties",
"main_window",
"namespace",
"diagrams",
"toolbox",
"elementeditor",
"export_menu",
"tools_menu",
]
)
yield session
session.shutdown()
def get_current_diagram(session):
return (
session.get_service("component_registry")
.get(UIComponent, "diagrams")
.get_current_diagram()
)
def test_creation(session):
# MainWindow should be created as resource
main_w = session.get_service("main_window")
main_w.open()
assert get_current_diagram(session) is None
def test_show_diagram(session):
element_factory = session.get_service("element_factory")
diagram = element_factory.create(Diagram)
main_w = session.get_service("main_window")
main_w.open()
event_manager = session.get_service("event_manager")
event_manager.handle(DiagramOpened(diagram))
assert get_current_diagram(session) == diagram
|
lgpl-2.1
| 7,249,366,273,309,667,000
| 24.849057
| 60
| 0.625547
| false
| 3.88102
| false
| false
| false
|
gridpp/dirac-getting-started
|
perform_frame_query.py
|
1
|
3626
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DIRAC and GridPP: perform a query on the CERN@school frames.
"""
#...for the operating system stuff.
import os
#...for parsing the arguments.
import argparse
#...for the logging.
import logging as lg
# Import the JSON library.
import json
# The DIRAC import statements.
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.Interfaces.API.Dirac import Dirac
#...for the DIRAC File Catalog client interface.
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
if __name__ == "__main__":
print("")
print("########################################################")
print("* GridPP and DIRAC: CERN@school frame metadata queries *")
print("########################################################")
print("")
# Get the datafile path from the command line.
parser = argparse.ArgumentParser()
parser.add_argument("queryJson", help="Path to the query JSON.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("dfcBaseDir", help="The name of the base directory on the DFC.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the data file.
datapath = args.queryJson
## The output path.
outputpath = args.outputPath
# Check if the output directory exists. If it doesn't, quit.
if not os.path.isdir(outputpath):
raise IOError("* ERROR: '%s' output directory does not exist!" % (outputpath))
## Base directory for the file uploads.
dfc_base = args.dfcBaseDir
# Set the logging level.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename='log_perform_frame_query.log', filemode='w', level=level)
print("*")
print("* Input JSON : '%s'" % (datapath))
print("* Output path : '%s'" % (outputpath))
print("* DFC base directory : '%s'" % (dfc_base))
## The DFC client.
fc = FileCatalogClient()
## The frame query JSON file - FIXME: check it exists...
qf = open(datapath, "r")
#
qd = json.load(qf)
qf.close()
meta_dict = {\
"start_time" : { ">=" : int(qd[0]["start_time"]) },
"end_time" : { "<=" : int(qd[0]["end_time" ]) }
# #"lat" : { ">" : 60.0 }\
# #"n_pixel" : { ">" : 700 }\
# #"n_kluster" : { ">" : 40}\
}
## The query result.
result = fc.findFilesByMetadata(meta_dict, path=dfc_base)
print("*")
print "* Metadata query:", meta_dict
print("*")
print("* Number of frames found : %d" % (len(result["Value"])))
print("*")
# Get the cluster file names from the metadata query.
# ## Kluster file names.
# kluster_file_names = []
for fn in sorted(result["Value"]):
#print("* Found: '%s'." % (fn))
filemetadata = fc.getFileUserMetadata(fn)
frameid = str(filemetadata['Value']['frameid'])
n_kluster = int(filemetadata['Value']['n_kluster'])
print("*--> Frame ID : '%s'" % (frameid))
print("*--> Number of clusters = %d" % (n_kluster))
#print("*")
# for i in range(n_kluster):
# kn = "%s_k%05d.png" % (frameid, i)
# kluster_file_names.append(kn)
# print("*")
#
# #lg.info(" * Clusters to be downloaded:")
# #for kn in kluster_names:
# # lg.info(" *--> '%s'" % (kn))
#
# print("* Number of clusters found : %d" % (len(kluster_file_names)))
|
mit
| -6,275,122,451,724,172,000
| 28.479675
| 97
| 0.565913
| false
| 3.541016
| false
| false
| false
|
songsense/Pregelix_Social_Graph
|
preprocessing/twitter_with_tags_parser.py
|
1
|
4458
|
import os
import sys
neighborDict = {} #dictionary containing neighbors of each node
weightDict = {} #dictionary containing weights of edges
featureDict = {} #dictionary containing features of each node
featureDictTotal = {} #dictionay containing all listed features of each node
totalFeatureDict = {} #ditionary containing features of all nodes
# the path of data files
currPath = "../twitter"
# list all files
fileArray = os.listdir(currPath)
######## get totalFeature #############
for fileGraphName in fileArray:
if fileGraphName.endswith('.featnames'): # if the file is the '*.featnames' file which lists all possible features of current node
nodeNum = fileGraphName[0:len(fileGraphName)-10]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName);
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
featureArray = []
while line:
line = line.rstrip();
lineArray = line.split(' ');
# add each feature into dictionary
if(not totalFeatureDict.has_key(lineArray[1])):
length = len(totalFeatureDict);
totalFeatureDict[lineArray[1]] = length;
featureArray.append(lineArray[1]);
line = fileGraph.readline();
featureDictTotal[nodeNum]=featureArray;
######## get features ###############
for fileGraphName in fileArray:
if fileGraphName.endswith('.egofeat'): # if the file is the '*.egofeat' file which lists the actual features of each node
nodeNum = fileGraphName[0:len(fileGraphName)-8]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName);
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
features = []
while line:
line = line.rstrip();
lineArray = line.split(' ');
for i in range(0, len(lineArray)):
if(lineArray[i]=='1'): #'1' indicates that the node has the feature to which '1' corresponds
features.append(totalFeatureDict[featureDictTotal[nodeNum][i]]);
line = fileGraph.readline();
featureDict[nodeNum] = features;
######### get neighbors and weights #############
for fileGraphName in fileArray:
if fileGraphName.endswith('.feat'): # if the file is the '*.feat' file which lists all the neighbors of each node and their features
nodeNum = fileGraphName[0:len(fileGraphName)-5]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName)
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
neighbor = []; # array to contain neighbors
weights = []; #array to contain weights
## get node features ##
fileNodeFeature = open(os.path.join(currPath, nodeNum+'.egofeat'), 'r');
lineEgoFeature = fileNodeFeature.readline();
lineEgoFeature = lineEgoFeature.rstrip();
lineEgoFeatureArray = lineEgoFeature.split(' ');
while line:
line = line.rstrip();
lineArray = line.split(' ');
neighbor.append(lineArray[0]);
weight = 0;
for i in range(0, len(lineEgoFeatureArray)):
if(lineArray[i+1]=='1' and lineEgoFeatureArray[i]=='1'):# if both a neighbor and current node have a same feature, weight increases by 1
weight+=1;
weights.append(weight);
line = fileGraph.readline();
neighborDict[nodeNum] = neighbor;
weightDict[nodeNum] = weights;
######### write to profile ################
### write feature and index num ####
fileName = 'featureIndex.txt'
fileOut = open(fileName, 'w');
for tag in totalFeatureDict.keys():
fileOut.writelines(tag+' '+str(totalFeatureDict[tag])+'\n')
fileOut.close()
### write neightbors and weights ####
fileName = 'graph.txt'
fileOut = open(fileName, 'w');
for nodeNum in neighborDict.keys():
line = nodeNum+' '+str(len(neighborDict[nodeNum]));
for i in range(0, len(neighborDict[nodeNum])):
line = line+' '+neighborDict[nodeNum][i];
line = line+' '+str(weightDict[nodeNum][i]);
line = line + ' ' + str(len(featureDict[nodeNum]));
for feature in featureDict[nodeNum]:
line = line + ' ' + str(feature);
line = line+'\n';
fileOut.writelines(line);
fileOut.close()
|
apache-2.0
| 521,520,475,521,803,800
| 37.765217
| 152
| 0.615074
| false
| 4.030741
| false
| false
| false
|
denadai2/A-Tale-of-Cities---code
|
converter.py
|
1
|
4148
|
__author__ = 'Marco De Nadai'
__license__ = "MIT"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import datetime
import csv
from collections import namedtuple
from collections import defaultdict
import fiona
from shapely.geometry import shape, Polygon
# Import the CDRs of MILANO
df = pd.read_csv('datasets/MILANO_CDRs.csv', sep=',', encoding="utf-8-sig", parse_dates=['datetime'])
# Considered technologies list
TECHNOLOGIES = ['GSM1800']
# Import the technologies' coverage areas
# Note: the input files cannot be shared.
coverage_polygons = []
for t in TECHNOLOGIES:
source = fiona.open('datasets/shapefiles/COVERAGE_'+t+'.shp', 'r')
for polygon in source:
coverage_polygons.append(shape(polygon['geometry']))
# Create squares of 1 km^2
# ref: http://stackoverflow.com/questions/4000886/gps-coordinates-1km-square-around-a-point
earth_circumference = math.cos(math.radians(df['lat'].mean()))*40075.16
# Let's create squares of 235x235 metres
kms = 0.235
gridWidth = math.float(kms * (360./earth_circumference))
gridHeight = math.float(kms/111.32)
# GRID bounds (coordinates)
XMIN = 9.011533669936474
YMIN = 45.356261753717845
XMAX = 9.312688264185276
YMAX = 45.56821407553667
# Get the number of rows and columns
rows = math.ceil((YMAX-YMIN)/gridHeight)
cols = math.ceil((XMAX-XMIN)/gridWidth)
Square = namedtuple('Square', ['x', 'y', 'cx', 'cy', 'polygon'])
square_grid = []
for i in range(int(rows)):
for j in range(int(cols)):
x = XMIN+j*gridWidth
y = YMIN+i*gridHeight
centerx = (x+x+gridWidth)/2.
centery = (y+y+gridHeight)/2.
p = Polygon([[x,y], [x, y+gridHeight], [x+gridWidth, y+gridHeight], [x+gridWidth, y]])
square_grid.append(Square(x, y, centerx, centery, p))
# Calculate the intersections of the coverage cells with the grids' square
intersections = []
for t in TECHNOLOGIES:
for i, v in enumerate(coverage_polygons[t]):
total_coverage_area = v.polygon.area
for j, s in enumerate(square_grid):
if v.polygon.intersects(s.polygon):
# To avoid Python floating point errors
if s.polygon.contains(v.polygon):
fraction = 1.0
else:
# Calculates the proportion between the intersection between the coverage and the grid
# square. This is useful to assign the right proportion of the the mobile usage to the
# grid square.
fraction = (v.polygon.intersection(s.polygon).area/total_coverage_area)
coverage_polygons[t][i].intersections.append([j, fraction])
coverage_intersections = defaultdict(dict)
for t in TECHNOLOGIES:
coverage_intersections[t] = defaultdict(dict)
for p in coverage_polygons[t]:
coverage_intersections[t][p.CGI] = p.intersections
# We build a hash table to search in a fast way all the CGI of a technology
hash_cgi_tech = {}
for index,row in df.groupby(['cgi','technology'], as_index=False).sum().iterrows():
hash_cgi_tech[row['cgi']] = row['technology']
# Select the data grouped by hour and countrycode
groups = df.groupby(['datetime', 'countrycode'])
#
# Example file with the format:
# datetime,CGI,countryCode,numRecords
#
with open('dati/MILANO_grid.csv', 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["datetime", "GridCell", "countryCode", "numRecords"])
for name, group in groups:
# iterate group's rows
data = []
d = defaultdict(int)
for index, row in enumerate(group.values):
CGI = row[1]
tech = hash_cgi_tech[CGI]
if CGI in coverage_intersections[tech]:
for (cell_number, cell_intersection_portion) in coverage_intersections[tech][CGI]:
d[str(cell_number) + "_" + str(row[3])] += float(row[2]*cell_intersection_portion)
datetime_rows = group.values[0, 0]
rows = [[datetime_rows] + k.split("_") + [v] for (k, v) in d.iteritems()]
csvwriter.writerows(rows)
|
mit
| -7,795,471,707,981,156,000
| 32.451613
| 106
| 0.657425
| false
| 3.411184
| false
| false
| false
|
Edu-Glez/mesos-test
|
container/test_naive_bayes.py
|
1
|
1645
|
import pickle
import pandas as pd
#import numpy as np
import nltk
import time
start_time = time.time()
a=pd.read_table('tweets_pos_clean.txt')
b=pd.read_table('tweets_neg_clean.txt')
aux1=[]
aux2=[]
auxiliar1=[]
auxiliar2=[]
for element in a['Text']:
for w in element.split():
if (w==':)' or len(w)>3):
auxiliar1.append(w)
aux1.append((auxiliar1,'positive'))
auxiliar1=[]
for element in b['text']:
for w in element.split():
if (w==':(' or len(w)>3):
auxiliar2.append(w)
aux2.append((auxiliar2,'negative'))
auxiliar2=[]
aux1=aux1[:100]
aux2=aux2[:200]
pos_df=pd.DataFrame(aux1)
neg_df=pd.DataFrame(aux2)
pos_df.columns=['words','sentiment']
neg_df.columns=['words','sentiment']
#table_aux=[pos_df,neg_df]
#tweets1=pd.concat(table_aux)
#tweets1.columns('words','sentiment')
table_aux1=aux1+aux2
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
word_features = get_word_features(get_words_in_tweets(table_aux1))
training_set = nltk.classify.apply_features(extract_features, table_aux1)
classifier = nltk.NaiveBayesClassifier.train(training_set)
word_features = list(word_features)
with open('objs2.pickle','wb') as f:
pickle.dump([classifier, word_features],f)
print("Tomo %s segundos ejecutarse" % (time.time() - start_time))
|
mit
| -3,334,451,982,492,560,400
| 21.847222
| 73
| 0.711246
| false
| 2.627796
| false
| false
| false
|
ReactiveX/RxPY
|
examples/asyncio/toasyncgenerator.py
|
1
|
1779
|
import asyncio
from asyncio import Future
import rx
from rx import operators as ops
from rx.scheduler.eventloop import AsyncIOScheduler
from rx.core import Observable
def to_async_generator(sentinel=None):
loop = asyncio.get_event_loop()
future = Future()
notifications = []
def _to_async_generator(source: Observable):
def feeder():
nonlocal future
if not notifications or future.done():
return
notification = notifications.pop(0)
if notification.kind == "E":
future.set_exception(notification.exception)
elif notification.kind == "C":
future.set_result(sentinel)
else:
future.set_result(notification.value)
def on_next(value):
"""Takes on_next values and appends them to the notification queue"""
notifications.append(value)
loop.call_soon(feeder)
source.pipe(ops.materialize()).subscribe(on_next)
@asyncio.coroutine
def gen():
"""Generator producing futures"""
nonlocal future
loop.call_soon(feeder)
future = Future()
return future
return gen
return _to_async_generator
@asyncio.coroutine
def go(loop):
scheduler = AsyncIOScheduler(loop)
xs = rx.from_([x for x in range(10)], scheduler=scheduler)
gen = xs.pipe(to_async_generator())
# Wish we could write something like:
# ys = (x for x in yield from gen())
while True:
x = yield from gen()
if x is None:
break
print(x)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(go(loop))
if __name__ == '__main__':
main()
|
mit
| 6,042,081,814,149,646,000
| 22.72
| 81
| 0.587409
| false
| 4.266187
| false
| false
| false
|
tkarna/cofs
|
examples/channel3d/channel3d_closed.py
|
1
|
2541
|
"""
Idealised channel flow in 3D
============================
Solves shallow water equations in closed rectangular domain
with sloping bathymetry.
Initially water elevation is set to a piecewise linear function
with a slope in the deeper (left) end of the domain. This results
in a wave that develops a shock as it reaches shallower end of the domain.
This example tests the integrity of the coupled 2D-3D model and stability
of momentum advection.
This test is also useful for testing tracer conservation and consistency
by advecting a constant passive tracer.
"""
from thetis import *
n_layers = 6
outputdir = 'outputs_closed'
lx = 100e3
ly = 3000.
nx = 80
ny = 3
mesh2d = RectangleMesh(nx, ny, lx, ly)
print_output('Exporting to ' + outputdir)
t_end = 6 * 3600
t_export = 900.0
if os.getenv('THETIS_REGRESSION_TEST') is not None:
t_end = 5*t_export
# bathymetry
P1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(P1_2d, name='Bathymetry')
depth_max = 20.0
depth_min = 7.0
xy = SpatialCoordinate(mesh2d)
bathymetry_2d.interpolate(depth_max - (depth_max-depth_min)*xy[0]/lx)
u_max = 4.5
w_max = 5e-3
# create solver
solver_obj = solver.FlowSolver(mesh2d, bathymetry_2d, n_layers)
options = solver_obj.options
options.element_family = 'dg-dg'
options.timestepper_type = 'SSPRK22'
options.solve_salinity = True
options.solve_temperature = False
options.use_implicit_vertical_diffusion = False
options.use_bottom_friction = False
options.use_ale_moving_mesh = True
options.use_limiter_for_tracers = True
options.use_lax_friedrichs_velocity = False
options.use_lax_friedrichs_tracer = False
options.simulation_export_time = t_export
options.simulation_end_time = t_end
options.output_directory = outputdir
options.horizontal_velocity_scale = Constant(u_max)
options.vertical_velocity_scale = Constant(w_max)
options.check_volume_conservation_2d = True
options.check_volume_conservation_3d = True
options.check_salinity_conservation = True
options.check_salinity_overshoot = True
options.fields_to_export = ['uv_2d', 'elev_2d', 'elev_3d', 'uv_3d',
'w_3d', 'w_mesh_3d', 'salt_3d',
'uv_dav_2d']
# initial elevation, piecewise linear function
elev_init_2d = Function(P1_2d, name='elev_2d_init')
max_elev = 6.0
elev_slope_x = 30e3
elev_init_2d.interpolate(conditional(xy[0] < elev_slope_x, -xy[0]*max_elev/elev_slope_x + max_elev, 0.0))
salt_init_3d = Constant(4.5)
solver_obj.assign_initial_conditions(elev=elev_init_2d, salt=salt_init_3d)
solver_obj.iterate()
|
mit
| -5,102,377,859,405,900,000
| 31.576923
| 105
| 0.72924
| false
| 2.871186
| false
| false
| false
|
mementum/tcmanager
|
src/mvcbase.py
|
1
|
5930
|
#!/usr/bin/env python
# -*- coding: latin-1; py-indent-offset:4 -*-
################################################################################
#
# Copyright (C) 2014 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import functools
import inspect
import threading
import weakref
from pubsub import pub
import wx
class MvcAttribute(object):
_mvccontainer = weakref.WeakValueDictionary()
def __init__(self):
self.icache = dict()
def __set__(self, instance, value):
self.icache[instance] = value
def __get__(self, instance, owner=None):
return self.icache.setdefault(instance)
def MvcContainer(cls):
cls._model = MvcAttribute()
cls._view = MvcAttribute()
cls._controller = MvcAttribute()
cls.__oldinit__ = cls.__init__
@functools.wraps(cls.__init__)
def newInit(self, *args, **kwargs):
curThId = threading.current_thread().ident
MvcAttribute._mvccontainer[curThId] = self
self.__oldinit__(*args, **kwargs)
cls.__init__ = newInit
return cls
def MvcRole(role):
def wrapper(cls):
pubsubmap = {'view': 'model', 'controller': 'view'}
oldInit = cls.__init__
@functools.wraps(cls.__init__)
def newInit(self, *args, **kwargs):
# Assign the role
self.role = role
# Assign the mvcontainer
curThId = threading.current_thread().ident
self._mvccontainer = _mvccontainer = MvcAttribute._mvccontainer[curThId]
# Pubsub some methods
methods = inspect.getmembers(self.__class__, predicate=inspect.ismethod)
mvcid = id(_mvccontainer)
for method in methods:
if hasattr(method[1], '_pubsub'):
boundmethod = method[1].__get__(self, self.__class__)
psmap = pubsubmap[role]
pstopic = '%d.%s.%s' % (mvcid, psmap, method[1]._pubsub)
pub.subscribe(boundmethod, pstopic)
elif hasattr(method[1], '_pubsubspec'):
boundmethod = method[1].__get__(self, self.__class__)
pstopic = '%d.%s' % (mvcid, method[1]._pubsubspec)
pub.subscribe(boundmethod, pstopic)
if role == 'view':
# Rebind some methods to controller
_controller = _mvccontainer._controller
methods = inspect.getmembers(_controller, predicate=inspect.ismethod)
for method in methods:
if hasattr(method[1], '_viewcontroller'):
setattr(self, method[0], method[1].__get__(self, self.__class__))
oldInit(self, *args, **kwargs)
cls.__init__ = newInit
oldGetAttribute = cls.__getattribute__
def newGetAttribute(self, name):
_mvcroles = ['_model', '_view', '_controller']
if name in _mvcroles:
_mvccontainer = oldGetAttribute(self, '_mvccontainer')
return getattr(_mvccontainer, name)
return oldGetAttribute(self, name)
cls.__getattribute__ = newGetAttribute
if False:
def PubSend(self, **kwargs):
pub.sendMessage(self.role, **kwargs)
cls.PubSend = PubSend
return cls
return wrapper
ModelRole = MvcRole('model')
ViewRole = MvcRole('view')
ControllerRole = MvcRole('controller')
def ViewManager(func):
@functools.wraps(func)
def wrapper(self, event, *args, **kwargs):
event.Skip()
return func(self, event, *args, **kwargs)
wrapper._viewcontroller = True
return wrapper
def PubSubscribe(subtopic):
def decorate(func):
func._pubsub = subtopic
return func
return decorate
def PubSubscribeSpecific(subtopic):
def decorate(func):
func._pubsubspec = subtopic
return func
return decorate
def PubSend(topic=None, queue=True):
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
mvcid = id(self._mvccontainer)
try:
msg = func(self, *args, **kwargs)
if not topic:
sendtopic = None
else:
sendtopic = '%d.%s.%s' % (mvcid, self.role, topic)
except Exception, e:
msg = str(e)
sendtopic = '%d.%s.%s' % (mvcid, self.role, 'error')
if sendtopic:
if queue:
wx.CallAfter(pub.sendMessage, sendtopic, msg=msg)
else:
pub.sendMessage(sendtopic, msg=msg)
return wrapper
return decorate
def PubSendSpecific(topic, queue=True):
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
msg = func(self, *args, **kwargs)
mvcid = id(self._mvccontainer)
sendtopic = '%d.%s' % (mvcid, topic)
# sendtopic = topic
if queue:
wx.CallAfter(pub.sendMessage, sendtopic, msg=msg)
else:
pub.sendMessage(sendtopic, msg=msg)
return wrapper
return decorate
|
gpl-3.0
| -4,661,281,447,062,204,000
| 31.762431
| 89
| 0.558685
| false
| 4.135286
| false
| false
| false
|
chrys87/orca-beep
|
test/keystrokes/firefox/line_nav_button_in_link_position_relative_on_focus.py
|
1
|
1860
|
#!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Line Down",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: ' Line 2 push button'",
" VISIBLE: ' Line 2 push button', cursor=1",
"SPEECH OUTPUT: ' Line 2 push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'Line 3'",
" VISIBLE: 'Line 3', cursor=1",
"SPEECH OUTPUT: 'Line 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"4. Line Up",
["BRAILLE LINE: ' Line 2 push button'",
" VISIBLE: ' Line 2 push button', cursor=1",
"SPEECH OUTPUT: ' Line 2 push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"5. Line Up",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
lgpl-2.1
| -3,976,546,266,804,137,500
| 31.421053
| 70
| 0.690476
| false
| 3.276596
| false
| true
| false
|
jbest/digitization_tools
|
productivity/productivity.py
|
1
|
6643
|
"""
Imaging productivity stats
Jason Best - jbest@brit.org
Generates a productivity report based on the creation timestamps of image files.
Details of the imaging session are extracted from the folder name containing the images.
Assumed folder name format is: YYYY-MM-DD_ImagerID_OtherInfo
Usage:
python productivity.py [session_folder_path]
Requirements:
See requirements.txt.
"""
from datetime import datetime
import sys
import time
import os
import csv
import re
# If you don't need moving mean calculated,
# or don't want to have to install other modules,
# you can remove the following imports then
# comment out the moving mean calculations
import pandas as pd
# parameters
# File extensions that are used to determine productivity. Others are ignored.
inputFileTypes = ('.jpg', '.jpeg', '.JPG', '.JPEG') # Variations of JPEG extensions
#inputFileTypes = ('.CR2', '.cr2') # Variations of Canon RAW extensions.
fieldDelimiter = ',' # delimiter used in output CSV
moving_mean_window = 21
def getImages(local_path=None):
"""
Generates a list of specimen files stored at a local path.
"""
imageFiles = []
dirList = os.listdir(local_path)
for fileName in dirList:
#TODO ignore case for extension evaluation
if fileName.endswith(inputFileTypes):
#imageFiles.append(local_path + fileName)
imageFiles.append(os.path.join(local_path, fileName))
return imageFiles
def getImageData(images):
stats_list = []
for imageFile in images:
#imageStats.append((imageFile, os.path.getmtime(imageFile)))
stats_list.append((imageFile, os.path.getmtime(imageFile)))
return stats_list
def get_session_data(session_folder=None):
if session_folder is not None:
print 'Analyzing: ', session_folder
session_re = re.match('(\d\d\d\d)[-_](\d\d)[-_](\d\d)[-_]([a-zA-Z]*)(.*)', session_folder)
if session_re:
if session_re.group(1):
year = int(session_re.group(1))
else:
year = 'NONE'
if session_re.group(2):
month = int(session_re.group(2))
else:
month = 'NONE'
if session_re.group(3):
day = int(session_re.group(3))
else:
day = 'NONE'
if session_re.group(4):
imager = session_re.group(4)
else:
imager = 'NONE'
if session_re.group(5):
other = session_re.group(5)
else:
other = 'NONE'
else:
return {'imager': 'NONE', 'year': 'NONE', 'month': 'NONE', 'day': 'NONE', 'other': 'NONE'}
return {'imager': imager, 'year': year, 'month': month, 'day': day, 'other': other}
else:
print 'No session folder provided.'
return None
# Analyze the image files
startTime = datetime.now()
# Determine session folder containing images
try:
if os.path.exists(sys.argv[1]):
#session_path = sys.argv[1]
session_path = os.path.abspath(sys.argv[1])
else:
session_path = os.path.dirname(os.path.realpath(__file__))
print 'No valid directory path provided. Assuming:', session_path
except IndexError:
# No path provided, assuming script directory
session_path = os.path.dirname(os.path.realpath(__file__))
print 'No valid directory path provided. Assuming:', session_path
session_folder = os.path.basename(session_path)
print 'session_path', session_path, 'session_folder', session_folder
#dir_path = os.path.dirname(imageFile) # full path of parent directory
#basename = os.path.basename(imageFile) # filename with extension
#filename, file_extension = os.path.splitext(basename)
imagesToEvaluate = getImages(session_path)
session_data = get_session_data(session_folder)
print 'session_data:', session_data
# populate imageStats
image_stats = getImageData(imagesToEvaluate)
# Create data structure
creation_time = None
creation_series = []
series_data = []
cumulative_time = 0
cumulative_mean = 0
image_count = 0
for image_data in sorted(image_stats,key=lambda x: x[1]): # sorted ensures results are in order of creation
file_path = image_data[0]
file_basename = os.path.basename(file_path) # filename with extension
if creation_time is None:
time_diff = 0
else:
time_diff = image_data[1] - creation_time
cumulative_time = cumulative_time + time_diff
image_count += 1
cumulative_mean = cumulative_time/image_count
creation_time = image_data[1]
creation_series.append(time_diff)
try:
cumulative_images_per_min = 60/cumulative_mean
except ZeroDivisionError:
cumulative_images_per_min = 0
#TODO format floats
session_date = str(session_data['month']) + '/' + str(session_data['day']) + '/' + str(session_data['year'])
series_data.append([file_path, file_basename, session_data['imager'], session_data['year'], session_data['month'], session_data['day'], session_data['other'], session_date, time.ctime(creation_time),time_diff, cumulative_time, cumulative_mean, cumulative_images_per_min ])
print 'Analyzing:', file_basename
# calculate moving mean
#TODO test to see if any data are available
data = pd.Series(creation_series)
data_mean = pd.rolling_mean(data, window=moving_mean_window).shift(-(moving_mean_window/2))
# Create file for results
log_file_base_name = session_data['imager'] + '_' + str(session_data['year']) + '-' + str(session_data['month']) + '-' + str(session_data['day'])
log_file_ext = '.csv'
if os.path.exists(log_file_base_name + log_file_ext):
log_file_name = log_file_base_name + '_' + startTime.isoformat().replace(':', '--') + log_file_ext
else:
log_file_name = log_file_base_name + log_file_ext
reportFile = open(log_file_name, "wb")
reportWriter = csv.writer(reportFile, delimiter = fieldDelimiter, escapechar='#')
# header
reportWriter.writerow([
"ImagePath",
"FileName",
"ImagerUsername",
"SessionYear",
"SessionMonth",
"SessionDay",
"SessionOther",
"SessionDate",
"CreationTime",
"CreationDurationSecs",
"CumulativeTimeSecs",
"CumulativeMeanSecs",
"CumulativeImagesPerMinute",
"MovingMeanSecs"])
# Merge moving mean into original data and write to file
for index, item in enumerate(series_data):
if str(data_mean[index]) == 'nan':
running_mean = 0
else:
running_mean = data_mean[index]
#print type(data_mean[index])
item.append(running_mean)
reportWriter.writerow(item)
# close file
reportFile.close()
print 'Analysis complete.'
|
mit
| 7,707,624,852,203,826,000
| 32.550505
| 276
| 0.657986
| false
| 3.602495
| false
| false
| false
|
masschallenge/django-accelerator
|
accelerator/migrations/0036_add_user_deferrable_modal.py
|
1
|
1688
|
# Generated by Django 2.2.10 on 2021-03-03 17:08
from django.conf import settings
from django.db import (
migrations,
models,
)
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accelerator', '0035_add_deferrable_modal_model'),
]
operations = [
migrations.CreateModel(
name='UserDeferrableModal',
fields=[
('id', models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('created_at', models.DateTimeField(
auto_now_add=True,
null=True)),
('updated_at', models.DateTimeField(
auto_now=True,
null=True)),
('is_deferred', models.BooleanField(default=False)),
('deferred_to', models.DateTimeField(
blank=True,
null=True)),
('deferrable_modal', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.ACCELERATOR_DEFERRABLEMODAL_MODEL)),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Deferrable Modal',
'abstract': False,
'managed': True,
'swappable': None,
},
),
]
|
mit
| -8,131,171,024,774,738,000
| 32.098039
| 68
| 0.495853
| false
| 4.75493
| false
| false
| false
|
aixiwang/mqtt_datajs
|
upload_data_test.py
|
1
|
1737
|
#!/usr/bin/python
import sys,time
try:
import paho.mqtt.client as mqtt
except ImportError:
# This part is only required to run the example from within the examples
# directory when the module itself is not installed.
#
# If you have the module installed, just use "import paho.mqtt.client"
import os
import inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../src")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import paho.mqtt.client as mqtt
def on_connect(mqttc, obj, flags, rc):
print("rc: "+str(rc))
def on_message(mqttc, obj, msg):
print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
def on_publish(mqttc, obj, mid):
print("mid: "+str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
# If you want to use a specific client id, use
# mqttc = mqtt.Client("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
#mqttc.on_log = on_log
#mqttc.username_pw_set('test1', 'test1')
mqttc.connect("test.mosquitto.org", 1883, 60)
#mqttc.connect("127.0.0.1", 1883, 60)
#mqttc.subscribe("room/#", 0)
while True:
(rc, final_mid) = mqttc.publish('home/room1/t1', '2.1', 0)
print 'sleep...' + str(rc) + str(final_mid)
time.sleep(1)
#mqttc.loop_forever()
|
bsd-3-clause
| 8,638,943,573,339,154,000
| 29.473684
| 137
| 0.679908
| false
| 2.91443
| false
| false
| false
|
scottsilverlabs/raspberrystem
|
rstem/projects/demos/two_buttons/button_test_aux.py
|
1
|
1652
|
#!/usr/bin/env python
import curses, time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def main(stdscr):
# Clear screen
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.addstr("Button Tester", curses.A_REVERSE)
stdscr.chgat(-1, curses.A_REVERSE)
#stdscr.addstr(curses.LINES-1, 0, "Press 'Q' to quit")
stdscr.nodelay(1) # make getch() non-blocking
# set up window to bounce ball
ball_win = curses.newwin(curses.LINES-2, curses.COLS, 1, 0)
ball_win.box()
#ball_win.addch(curses.LINES-1,curses.COLS-1, ord('F'))
# Update the internal window data structures
stdscr.noutrefresh()
ball_win.noutrefresh()
# Redraw the screen
curses.doupdate()
box_LINES, box_COLS = ball_win.getmaxyx()
ball_x, ball_y = (int(box_COLS/2), int(box_LINES/2))
while True:
# Quit if 'Q' was pressed
c = stdscr.getch()
if c == ord('Q') or c == ord('q'):
break
# remove previous location of ball
ball_win.addch(ball_y, ball_x, ord(' '))
stdscr.addstr(curses.LINES-1, 0, "Press 'Q' to quit | Left: {0} Right: {1}".format(not GPIO.input(23), not GPIO.input(18)))
if not GPIO.input(23) and ball_x < 1:
ball_x -= 1
if not GPIO.input(18) and ball_x < box_COLS-2:
ball_x += 1
# update ball location
ball_win.addch(ball_y, ball_x, ord('0'))
# Refresh the windows from the bottom up
stdscr.noutrefresh()
ball_win.noutrefresh()
curses.doupdate()
# Restore the terminal
curses.nocbreak()
curses.echo()
curses.curs_set(1)
curses.endwin()
#stdscr.refresh()
#stdscr.getkey()
curses.wrapper(main)
|
apache-2.0
| 8,123,518,483,904,408,000
| 21.630137
| 129
| 0.673123
| false
| 2.465672
| false
| false
| false
|
thuma/sestationinfo
|
stationinfo.py
|
1
|
1600
|
import urllib2
import json
files = '''blataget-gtfs.csv
blekingetrafiken-gtfs.csv
dalatrafik-gtfs.csv
gotlandskommun-gtfs.csv
hallandstrafiken-gtfs.csv
jonkopingslanstrafik-gtfs.csv
kalmarlanstrafik-gtfs.csv
lanstrafikenkronoberg-gtfs.csv
localdata-gtfs.csv
masexpressen.csv
nettbuss-gtfs.csv
nsb-gtfs.csv
ostgotatrafiken-gtfs.csv
pagelinks-gtfs.csv
peopletravelgrouop.csv
rt90cords-gtfs.csv
skanerafiken-gtfs.csv
sl-gtfs.csv
swebus-gtfs.csv
tib-gtfs.csv
treminalmaps-gtfs.csv
trv-gtfs.csv
ul-gtfs.csv
vasttrafik-gtfs.csv
xtrafik-gtfs.csv'''
data = files.split("\n")
print data
alldata = {}
for filename in data:
alldata[filename] = {}
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/'+filename)
downloaded = response.read().split("\n")
rubriker = downloaded[0].split(";")
downloaded[0] = downloaded[1]
for row in downloaded:
parts = row.split(";")
alldata[filename][parts[0]] = {}
for i in range(len(parts)):
alldata[filename][parts[0]][rubriker[i]] = parts[i]
print alldata['hallandstrafiken-gtfs.csv']['7400110']
'''
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/treminalmaps-gtfs.csv')
maps = response.read()
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/treminalmaps-gtfs.csv')
maps = response.read()
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/treminalmaps-gtfs.csv')
maps = response.read()'''
|
gpl-2.0
| 7,348,605,985,020,554,000
| 28.109091
| 130
| 0.754375
| false
| 2.515723
| false
| false
| false
|
JasonBristol/spor-ct
|
spor/research/models.py
|
1
|
1571
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from django.utils.text import slugify
class Project(models.Model):
title = models.CharField(max_length=50)
tagline = models.CharField(max_length=255)
description = models.TextField()
author = models.ForeignKey(User)
thumbnail = models.ImageField(upload_to="research/img")
thumbnail_200x100 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(200, 100)], format='PNG', options={'quality': 100})
thumbnail_500x300 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(500, 300)], format='PNG', options={'quality': 100})
thumbnail_700x400 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(700, 400)], format='PNG', options={'quality': 100})
thumbnail_750x500 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(750, 500)], format='PNG', options={'quality': 100})
document = models.FileField(upload_to="research/{0}/".format(1), blank=True)
date_published = models.DateTimeField(auto_now=True)
related_projects = models.ManyToManyField('self', blank=True)
publish = models.BooleanField(default=False)
slug = models.SlugField(unique=True, help_text="Only change this if you know what you are doing")
def __unicode__(self):
return self.title
def save(self, *args, **kw):
self.slug = slugify(self.title)
super(Project, self).save(*args, **kw)
|
mit
| 6,532,919,822,770,132,000
| 48.09375
| 135
| 0.724379
| false
| 3.879012
| false
| false
| false
|
rboman/progs
|
sandbox/tkinter/playing_with_tkinter.py
|
1
|
4424
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from tkinter import *
from future import standard_library
standard_library.install_aliases()
# nx.set("20")
def sortir():
root.quit()
root = Tk()
root.title('Parameters')
ni = IntVar()
ni.set(50)
nx = StringVar()
nx.set("10")
frame1 = Frame(root)
lab1 = Label(frame1, text="Mailles selon X (nx)", relief=SUNKEN)
# ou relief=RAISED, SUNKEN, FLAT, RIDGE, GROOVE, and SOLID
lab1.pack(side=LEFT)
ent1 = Entry(frame1, textvariable=nx, width=5)
ent1.pack(side=LEFT)
frame1.pack(pady=5)
ny = StringVar()
ny.set("10")
frame2 = Frame(root)
lab2 = Label(frame2, text="Mailles selon Y (ny)", bg='red', fg='yellow')
lab2.pack(side=LEFT)
ent2 = Entry(frame2, textvariable=ny, width=10, state=DISABLED, relief=GROOVE)
# ou state=ACTIVE, NORMAL
ent2.pack(side=LEFT)
frame2.pack(pady=5)
frame3 = Frame(root)
lab3 = Label(frame3, text="Radius", borderwidth=5, font=('Arial', 12, 'bold'))
lab3.pack(side=LEFT)
ent3 = Entry(frame3, textvariable=ny, width=10, justify=RIGHT)
# ou justify=LEFT, RIGHT, CENTER
ent3.pack(side=LEFT)
frame3.pack(pady=5)
frame4 = Frame(root)
lab41 = Label(frame4, text="X Length")
lab41.grid(row=1, column=1)
ent41 = Entry(frame4, width=30)
ent41.grid(row=1, column=2, sticky=W)
lab42 = Label(frame4, text="Y Length")
lab42.grid(row=2, column=1)
ent42 = Entry(frame4, width=10)
ent42.insert(0, "blabla")
ent42.grid(row=2, column=2, sticky=E) # sticky= N,S,E,W ou NS, ou NW, etc
lab43 = Label(frame4, text="Un super long texte")
lab43.grid(row=3, column=1, columnspan=2)
btn = Button(frame4, text="End")
btn.grid(row=4, column=1, columnspan=2)
def stop(event):
print(' click!')
btn.configure(bg='red')
lab42.destroy()
ent42.delete(0, len(ent42.get()))
btn.bind('<Button-1>', stop)
frame4.pack()
def affiche(x):
print(x)
list = ["one", "two", "three"]
dict = {}
for num in list:
def do_this(x=num): return affiche(x)
dict[num] = Button(root, text=num, command=do_this)
dict[num].pack()
but = Button(root, text="Start", command=sortir)
but.pack()
root.bind('q', stop)
root.bind('<Escape>', stop)
# mouse: <Enter>,<Leave>,<Button-3>,<Double-Button-1>,<B1-Motion>,<ButtonRelease>,<Shift-Button-1>
# kb: <Key>,<KeyRelease>,<Return>,...
win2 = Toplevel(root)
win2.title("Toplevels")
win2.maxsize(width=300, height=200)
win2.minsize(width=150, height=100)
win2.resizable(width=YES, height=NO)
def printVal():
print(num_holder.get())
num_holder = IntVar()
rb1 = Radiobutton(win2, text="Five", variable=num_holder,
value=5, command=printVal)
rb2 = Radiobutton(win2, text="Three", variable=num_holder,
value=3, command=printVal)
rb1.pack()
rb2.pack()
def printVal2():
print(txt1_holder.get())
print(txt2_holder.get())
txt1_holder = StringVar()
txt2_holder = StringVar()
rb1 = Checkbutton(win2, text="Five", variable=txt1_holder,
onvalue="FiveOn", offvalue="FiveOff", command=printVal2)
rb2 = Checkbutton(win2, text="Three", variable=txt2_holder,
onvalue="ThreeOn", offvalue="ThreeOff", command=printVal2)
rb1.pack()
rb2.pack()
def printVal3(x):
print(list.curselection())
choices = ["Red", "Orange", "Yellow", "Green", "Blue", "Purple"]
list = Listbox(win2, height=2, selectmode=SINGLE)
list.pack()
for item in choices:
list.insert(END, item)
list.bind('<Button-1>', printVal3)
scroll = Scrollbar(win2, command=list.yview)
list.configure(yscrollcommand=scroll.set)
scroll.pack()
but = Button(win2, text=" ")
but.pack()
def printVal4(x):
print(scale.get())
but.configure(text=scale.get())
scale = Scale(win2, orient=HORIZONTAL, length=100,
from_=0, to=100, tickinterval=50,
command=printVal4)
scale.pack()
Label(win2, bitmap="warning", cursor="pirate").pack()
picture = PhotoImage(file="bouteille.gif")
Label(win2, image=picture, cursor="fleur").pack()
def message():
rt2 = Toplevel(root)
msg = Message(rt2, text="Here is the first line of text. "
"Here is the next line of text. "
"Now we are on line three. "
"Oooh, look mom, line four! "
"Okay, that's enough. Goodbye.", bg="white", fg="red")
msg.pack(fill=BOTH)
rt2.transient(root)
message()
root.mainloop()
root.withdraw()
# root.destroy()
# print 'nx=', ent1.get()
print('nx=', nx.get())
print('ny=', ny.get())
|
apache-2.0
| 8,750,211,535,864,202,000
| 21.804124
| 98
| 0.654837
| false
| 2.732551
| false
| false
| false
|
jittat/ku-eng-direct-admission
|
application/fields.py
|
1
|
1109
|
from django.db import models
class IntegerListField(models.Field):
"""
IntegerListField keeps a list of int as a comma-separated string.
>>> g = IntegerListField()
>>> g.get_db_prep_value([1,2,-1,20,30,40,-100])
'1,2,-1,20,30,40,-100'
>>> g.to_python('1,2,-10,3,4,-100,7')
[1,2,-10,3,4,-100,7]
"""
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'text'
def to_python(self, value):
if isinstance(value, list):
return value
if value==None or value=='':
return []
else:
if value[0]=='[':
value = value[1:]
if value[-1]==']':
value = value[:-1]
return [ int(r) for r in value.split(',') ]
def get_db_prep_value(self, value):
return ','.join([str(r) for r in value])
# south introspection
from south.modelsinspector import add_introspection_rules
add_introspection_rules(
[(
[IntegerListField],
[],
{},
),
], ["^application\.fields\.IntegerListField"])
|
agpl-3.0
| -7,671,050,940,817,247,000
| 23.644444
| 69
| 0.522092
| false
| 3.588997
| false
| false
| false
|
p1c2u/openapi-core
|
tests/unit/unmarshalling/test_validate.py
|
1
|
32347
|
import datetime
from unittest import mock
import pytest
from openapi_core.extensions.models.models import Model
from openapi_core.spec.paths import SpecPath
from openapi_core.unmarshalling.schemas.exceptions import (
FormatterNotFoundError,
)
from openapi_core.unmarshalling.schemas.exceptions import InvalidSchemaValue
from openapi_core.unmarshalling.schemas.factories import (
SchemaUnmarshallersFactory,
)
from openapi_core.unmarshalling.schemas.util import build_format_checker
class TestSchemaValidate:
@pytest.fixture
def validator_factory(self):
def create_validator(schema):
format_checker = build_format_checker()
return SchemaUnmarshallersFactory(
format_checker=format_checker
).create(schema)
return create_validator
@pytest.mark.parametrize(
"schema_type",
[
"boolean",
"array",
"integer",
"number",
"string",
],
)
def test_null(self, schema_type, validator_factory):
spec = {
"type": schema_type,
}
schema = SpecPath.from_spec(spec)
value = None
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"schema_type",
[
"boolean",
"array",
"integer",
"number",
"string",
],
)
def test_nullable(self, schema_type, validator_factory):
spec = {
"type": schema_type,
"nullable": True,
}
schema = SpecPath.from_spec(spec)
value = None
result = validator_factory(schema).validate(value)
assert result is None
def test_string_format_custom_missing(self, validator_factory):
custom_format = "custom"
spec = {
"type": "string",
"format": custom_format,
}
schema = SpecPath.from_spec(spec)
value = "x"
with pytest.raises(FormatterNotFoundError):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [False, True])
def test_boolean(self, value, validator_factory):
spec = {
"type": "boolean",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 3.14, "true", [True, False]])
def test_boolean_invalid(self, value, validator_factory):
spec = {
"type": "boolean",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [(1, 2)])
def test_array_no_schema(self, value, validator_factory):
spec = {
"type": "array",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[1, 2]])
def test_array(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "integer",
},
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, 1, 3.14, "true", (3, 4)])
def test_array_invalid(self, value, validator_factory):
spec = {
"type": "array",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [1, 3])
def test_integer(self, value, validator_factory):
spec = {
"type": "integer",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, 3.14, "true", [1, 2]])
def test_integer_invalid(self, value, validator_factory):
spec = {
"type": "integer",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_integer_minimum_invalid(self, value, validator_factory):
spec = {
"type": "integer",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [4, 5, 6])
def test_integer_minimum(self, value, validator_factory):
spec = {
"type": "integer",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [4, 5, 6])
def test_integer_maximum_invalid(self, value, validator_factory):
spec = {
"type": "integer",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_integer_maximum(self, value, validator_factory):
spec = {
"type": "integer",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 2, 4])
def test_integer_multiple_of_invalid(self, value, validator_factory):
spec = {
"type": "integer",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [3, 6, 18])
def test_integer_multiple_of(self, value, validator_factory):
spec = {
"type": "integer",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 3.14])
def test_number(self, value, validator_factory):
spec = {
"type": "number",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, "true", [1, 3]])
def test_number_invalid(self, value, validator_factory):
spec = {
"type": "number",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_number_minimum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [3, 4, 5])
def test_number_minimum(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 2, 3])
def test_number_exclusive_minimum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
"exclusiveMinimum": True,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [4, 5, 6])
def test_number_exclusive_minimum(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
"exclusiveMinimum": True,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [4, 5, 6])
def test_number_maximum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [1, 2, 3])
def test_number_maximum(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [3, 4, 5])
def test_number_exclusive_maximum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
"exclusiveMaximum": True,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_number_exclusive_maximum(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
"exclusiveMaximum": True,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 2, 4])
def test_number_multiple_of_invalid(self, value, validator_factory):
spec = {
"type": "number",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [3, 6, 18])
def test_number_multiple_of(self, value, validator_factory):
spec = {
"type": "number",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", ["true", b"test"])
def test_string(self, value, validator_factory):
spec = {
"type": "string",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, 1, 3.14, [1, 3]])
def test_string_invalid(self, value, validator_factory):
spec = {
"type": "string",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
b"true",
"test",
False,
1,
3.14,
[1, 3],
datetime.datetime(1989, 1, 2),
],
)
def test_string_format_date_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "date",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
"1989-01-02",
"2018-01-02",
],
)
def test_string_format_date(self, value, validator_factory):
spec = {
"type": "string",
"format": "date",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"12345678-1234-5678-1234-567812345678",
],
)
def test_string_format_uuid(self, value, validator_factory):
spec = {
"type": "string",
"format": "uuid",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
b"true",
"true",
False,
1,
3.14,
[1, 3],
datetime.date(2018, 1, 2),
datetime.datetime(2018, 1, 2, 23, 59, 59),
],
)
def test_string_format_uuid_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "uuid",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
b"true",
"true",
False,
1,
3.14,
[1, 3],
"1989-01-02",
],
)
def test_string_format_datetime_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "date-time",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
"1989-01-02T00:00:00Z",
"2018-01-02T23:59:59Z",
],
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_STRICT_RFC3339", True
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_ISODATE", False
)
def test_string_format_datetime_strict_rfc3339(
self, value, validator_factory
):
spec = {
"type": "string",
"format": "date-time",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"1989-01-02T00:00:00Z",
"2018-01-02T23:59:59Z",
],
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_STRICT_RFC3339",
False,
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_ISODATE", True
)
def test_string_format_datetime_isodate(self, value, validator_factory):
spec = {
"type": "string",
"format": "date-time",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"true",
False,
1,
3.14,
[1, 3],
"1989-01-02",
"1989-01-02T00:00:00Z",
],
)
def test_string_format_binary_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "binary",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
b"stream",
b"text",
],
)
def test_string_format_binary(self, value, validator_factory):
spec = {
"type": "string",
"format": "binary",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
b"dGVzdA==",
"dGVzdA==",
],
)
def test_string_format_byte(self, value, validator_factory):
spec = {
"type": "string",
"format": "byte",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"tsssst",
b"tsssst",
b"tesddddsdsdst",
],
)
def test_string_format_byte_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "byte",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
"test",
b"stream",
datetime.date(1989, 1, 2),
datetime.datetime(1989, 1, 2, 0, 0, 0),
],
)
def test_string_format_unknown(self, value, validator_factory):
unknown_format = "unknown"
spec = {
"type": "string",
"format": unknown_format,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(FormatterNotFoundError):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["", "a", "ab"])
def test_string_min_length_invalid(self, value, validator_factory):
spec = {
"type": "string",
"minLength": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["abc", "abcd"])
def test_string_min_length(self, value, validator_factory):
spec = {
"type": "string",
"minLength": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"",
],
)
def test_string_max_length_invalid_schema(self, value, validator_factory):
spec = {
"type": "string",
"maxLength": -1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["ab", "abc"])
def test_string_max_length_invalid(self, value, validator_factory):
spec = {
"type": "string",
"maxLength": 1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["", "a"])
def test_string_max_length(self, value, validator_factory):
spec = {
"type": "string",
"maxLength": 1,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", ["foo", "bar"])
def test_string_pattern_invalid(self, value, validator_factory):
spec = {
"type": "string",
"pattern": "baz",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["bar", "foobar"])
def test_string_pattern(self, value, validator_factory):
spec = {
"type": "string",
"pattern": "bar",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", ["true", False, 1, 3.14, [1, 3]])
def test_object_not_an_object(self, value, validator_factory):
spec = {
"type": "object",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
Model(),
],
)
def test_object_multiple_one_of(self, value, validator_factory):
one_of = [
{
"type": "object",
},
{
"type": "object",
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_different_type_one_of(self, value, validator_factory):
one_of = [
{
"type": "integer",
},
{
"type": "string",
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_no_one_of(self, value, validator_factory):
one_of = [
{
"type": "object",
"required": [
"test1",
],
"properties": {
"test1": {
"type": "string",
},
},
},
{
"type": "object",
"required": [
"test2",
],
"properties": {
"test2": {
"type": "string",
},
},
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{
"foo": "FOO",
},
{
"foo": "FOO",
"bar": "BAR",
},
],
)
def test_unambiguous_one_of(self, value, validator_factory):
one_of = [
{
"type": "object",
"required": [
"foo",
],
"properties": {
"foo": {
"type": "string",
},
},
"additionalProperties": False,
},
{
"type": "object",
"required": ["foo", "bar"],
"properties": {
"foo": {
"type": "string",
},
"bar": {
"type": "string",
},
},
"additionalProperties": False,
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_default_property(self, value, validator_factory):
spec = {
"type": "object",
"default": "value1",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_min_properties_invalid_schema(
self, value, validator_factory
):
spec = {
"type": "object",
"minProperties": 2,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_min_properties_invalid(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"minProperties": 4,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_min_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"minProperties": 1,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_max_properties_invalid_schema(
self, value, validator_factory
):
spec = {
"type": "object",
"maxProperties": -1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_max_properties_invalid(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"maxProperties": 0,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_max_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"maxProperties": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{"additional": 1},
],
)
def test_object_additional_properties(self, value, validator_factory):
spec = {
"type": "object",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{"additional": 1},
],
)
def test_object_additional_properties_false(
self, value, validator_factory
):
spec = {
"type": "object",
"additionalProperties": False,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"additional": 1},
],
)
def test_object_additional_properties_object(
self, value, validator_factory
):
additional_properties = {
"type": "integer",
}
spec = {
"type": "object",
"additionalProperties": additional_properties,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [[], [1], [1, 2]])
def test_list_min_items_invalid(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"minItems": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[], [1], [1, 2]])
def test_list_min_items(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"minItems": 0,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
[],
],
)
def test_list_max_items_invalid_schema(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"maxItems": -1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[1, 2], [2, 3, 4]])
def test_list_max_items_invalid(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"maxItems": 1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[1, 2, 1], [2, 2]])
def test_list_unique_items_invalid(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"uniqueItems": True,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{
"someint": 123,
},
{
"somestr": "content",
},
{
"somestr": "content",
"someint": 123,
},
],
)
def test_object_with_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {
"somestr": {
"type": "string",
},
"someint": {
"type": "integer",
},
},
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{
"somestr": {},
"someint": 123,
},
{
"somestr": ["content1", "content2"],
"someint": 123,
},
{
"somestr": 123,
"someint": 123,
},
{
"somestr": "content",
"someint": 123,
"not_in_scheme_prop": 123,
},
],
)
def test_object_with_invalid_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {
"somestr": {
"type": "string",
},
"someint": {
"type": "integer",
},
},
"additionalProperties": False,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
|
bsd-3-clause
| 4,296,732,595,900,197,400
| 25.866279
| 79
| 0.498532
| false
| 4.144395
| true
| false
| false
|
freehackquest/backend
|
fhq-server/templates/tmpl_create_new_storage_update.py
|
1
|
4081
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import random
import string
updatespath = "../src/storages/updates/"
fileslist = [f for f in os.listdir(updatespath) if os.path.isfile(os.path.join(updatespath, f))]
pattern = r'.*StorageUpdateBase.*\(.*"([a-zA-Z0-9]*)".*,.*"([a-zA-Z0-9]*)".*,.*\).*'
updates = []
for filename in fileslist:
filepath = os.path.join(updatespath, filename)
# print(filepath);
with open(filepath) as f:
line = f.readline()
while line:
line = line.strip()
if re.match(pattern, line):
versions = re.search(pattern, line, re.IGNORECASE)
if versions:
updates.append({
"from": versions.group(1),
"to": versions.group(2),
})
line = f.readline()
# for tests
'''
updates.append({
"from": "u0100",
"to": "615d8fddd",
})
updates.append({
"from": "615d8fddd",
"to": "995d8fddd",
})'''
# print all updates
# for v in updates:
# print("[" + v["from"] + "] -> [" + v["to"] + "]")
# find the ends in graph
end_points = []
max_weight = 0
def recoursive_search_endpoints(spoint, weight):
global updates, end_points, max_weight
found = False
for v in updates:
if v["from"] == spoint:
found = True
recoursive_search_endpoints(v["to"], weight + 1)
if not found:
if weight > max_weight:
max_weight = weight;
end_points.append({
"point": spoint,
"weight": weight
})
recoursive_search_endpoints("", 0)
print(end_points)
if len(end_points) == 0:
print("Not found updates")
exit(-1)
endpoint = ""
for i in end_points:
if i["weight"] == max_weight and endpoint == "":
endpoint = i["point"]
elif i["weight"] == max_weight and endpoint != "":
print("WARNING: Found points with same weights, will be used first. Ignored: " + i["point"])
print("Found point: " + endpoint + " weight: " + str(max_weight))
newpoint = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
newpoint_upper = newpoint.upper();
endpoint_upper = endpoint.upper();
filename_h = "update_" + endpoint + "_" + newpoint + ".h"
filename_cpp = "update_" + endpoint + "_" + newpoint + ".cpp"
filename_h = os.path.join(updatespath, filename_h)
filename_cpp = os.path.join(updatespath, filename_cpp)
print("Generate header file: " + filename_h)
f_h = open(filename_h, 'w')
f_h.write("#ifndef UPDATE_" + endpoint_upper + "_" + newpoint_upper + "_H\n")
f_h.write("#define UPDATE_" + endpoint_upper + "_" + newpoint_upper + "_H\n")
f_h.write("\n")
f_h.write("#include <storages.h>\n")
f_h.write("\n")
f_h.write("class Update_" + endpoint + "_" + newpoint + " : public StorageUpdateBase {\n")
f_h.write(" public:\n")
f_h.write(" Update_" + endpoint + "_" + newpoint + "();\n")
f_h.write(" virtual bool custom(Storage *pStorage, StorageConnection *pConn, std::string &error);\n")
f_h.write("};\n")
f_h.write("\n")
f_h.write("#endif // UPDATE_" + endpoint_upper + "_" + newpoint_upper + "_H\n")
f_h.close();
print("Generate source file: " + filename_cpp)
f_cpp = open(filename_cpp, 'w')
f_cpp.write("#include \"update_" + endpoint + "_" + newpoint + ".h\"\n")
f_cpp.write("\n")
f_cpp.write("REGISTRY_STORAGE_UPDATE(Update_" + endpoint + "_" + newpoint + ")\n")
f_cpp.write("\n")
f_cpp.write("Update_" + endpoint + "_" + newpoint + "::Update_" + endpoint + "_" + newpoint + "()\n")
f_cpp.write(" : StorageUpdateBase(\"" + endpoint + "\", \"" + newpoint + "\", \"TODO\") {\n")
f_cpp.write(" \n")
f_cpp.write(" // fill the array with struct changes\n")
f_cpp.write("}\n")
f_cpp.write("\n")
f_cpp.write("bool Update_" + endpoint + "_" + newpoint + "::custom(Storage *pStorage, StorageConnection *pConn, std::string &error) {\n")
f_cpp.write(" // here you can migrate data of correction if not just return true;\n")
f_cpp.write(" return true;\n")
f_cpp.write("}\n")
f_cpp.close();
|
mit
| -4,647,844,123,428,140,000
| 30.152672
| 137
| 0.57976
| false
| 3.122418
| false
| false
| false
|
dstroppa/openstack-smartos-nova-grizzly
|
nova/exception.py
|
1
|
33666
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, *args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class EncryptionFailure(NovaException):
message = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
message = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
message = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
message = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
message = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume instance: %(reason)s.")
class InstancePowerOnFailure(Invalid):
message = _("Failed to power on instance: %(reason)s.")
class InstancePowerOffFailure(Invalid):
message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
message = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
message = _("Invalid ID received %(id)s.")
class InvalidPeriodicTaskArg(Invalid):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
message = _("No agent-build associated with id %(id)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ImageNotFoundEC2(ImageNotFound):
message = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(NovaException):
message = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
message = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(NovaException):
message = _("No free port available for instance %(instance)s.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
message = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class FloatingIpExists(Duplicate):
message = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
message = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
message = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
message = _("Cannot disassociate auto assigined floating ip")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
message = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
message = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class NoUniqueMatch(NovaException):
message = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
message = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
message = _("Cell %(cell_name)s doesn't exist.")
class CellRoutingInconsistency(NovaException):
message = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
message = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
message = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
message = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
message = _("No cells available matching scheduling criteria.")
class CellError(NovaException):
message = _("Exception received during cell processing: %(exc_name)s.")
class InstanceUnknownCell(NotFound):
message = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerCostFunctionNotFound(NotFound):
message = _("Scheduler cost function %(cost_fn_str)s could"
" not be found.")
class SchedulerWeightFlagNotFound(NotFound):
message = _("Scheduler weight flag not found: %(flag_name)s")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(Duplicate):
message = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(Duplicate):
message = _("Flavor access alreay exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
message = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
message = _("Resize error: %(reason)s")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
message = _("Maximum number of security groups or rules exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
message = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
message = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
message = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
message = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
message = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
message = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
message = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
message = _("Instance %(instance_uuid)s is locked")
class ConfigDriveMountFailed(NovaException):
message = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
message = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
message = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
message = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
message = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
message = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class InstanceActionNotFound(NovaException):
message = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
message = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
message = _('Instance recreate is not implemented by this virt driver.')
class ServiceGroupUnavailable(NovaException):
message = _("The service from servicegroup driver %(driver) is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
message = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
message = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
message = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
|
apache-2.0
| -3,833,240,339,379,516,400
| 27.267003
| 79
| 0.67335
| false
| 4.23312
| false
| false
| false
|
jiangzhengshen/Interesting
|
Crawler/crawler_enhanced.py
|
1
|
10134
|
import argparse
import hashlib
import logging
import os
import queue
import socket
from urllib.parse import quote, urlsplit
from urllib.request import Request, urlopen
from urllib.error import URLError
from pyquery import PyQuery
''' 根据输入的网址和深度爬网页
需要对php网页取得真实的网站地址
next:
并行化
断点续抓
URLCrawler: 结果存储为收藏夹的HTML格式
'''
class Crawler(object):
def __init__(self, args):
# log设置
log_file = 'crawler.log'
logging.basicConfig(filename=log_file,
format='%(asctime)s -> %(levelname)s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG)
logging.info('\n=================== New Session =====================')
''' 对整个socket层设置超时时间(s)。后续文件中如果再使用到socket,不必再设置 '''
socket.setdefaulttimeout(30)
''' 不变的参数以"_"开头 '''
self._init_urls = args.init_urls.split(";")
self._depth = args.depth
self._out_dir = args.out_dir if args.out_dir[-1] in ['/', '\\'] else args.out_dir + '/'
if not os.path.exists(self._out_dir):
os.mkdir(self._out_dir)
self.current_depth = 0
self.url_queue = queue.Queue() # 待爬取的url队列,格式(url, depth)
for url in self._init_urls:
self.url_queue.put((url, self.current_depth))
self.cached_urls = {} # 所有爬过网页的原始url,格式url -> [cnt, depth]
@staticmethod
def __get_html(url):
try:
return PyQuery(url=url, parser='html')
except Exception as e:
logging.warning('PyQuery: %s : %s\n\tURL: %s', type(e), e, url)
return None
@staticmethod
def __get_real_url(raw_url):
try:
'''
参考了js中encodeURI的不编码字符
escape不编码字符有69个:*,+,-,.,/,@,_,0-9,a-z,A-Z
encodeURI不编码字符有82个:!,#,$,&,',(,),*,+,,,-,.,/,:,;,=,?,@,_,~,0-9,a-z,A-Z
encodeURIComponent不编码字符有71个:!, ',(,),*,-,.,_,~,0-9,a-z,A-Z
'''
url = quote(raw_url, safe='!#$&()*+,-./:;=?@_~\'', encoding='utf-8')
req = Request(url)
response = urlopen(req)
new_url = response.geturl()
response.close()
return new_url
except URLError as e:
logging.warning('Request: URLError: %s\n\tRaw URL: %s', e.reason, raw_url)
return ''
except Exception as e:
logging.warning('Request: %s : %s\n\tRaw URL: %s', type(e), e, raw_url)
return ''
def __extract_url(self, html_pyquery):
""" extract all the urls from html, except for the cached urls """
try:
html_pyquery.make_links_absolute()
all_urls = html_pyquery('a').map(lambda i, element: PyQuery(element)('a').attr('href'))
url_list = set()
for url in all_urls:
real_url = self.__get_real_url(url)
if not real_url:
continue
if real_url in self.cached_urls:
self.cached_urls[real_url][0] += 1
else:
url_list.add(real_url)
return list(url_list)
except Exception as e:
logging.warning('PyQuery: %s : %s', type(e), e)
return []
def __dump_cached_urls(self):
with open('cached_urls.txt', 'w') as dump_file:
for url in self.cached_urls:
dump_file.write(
url + '\t' + str(self.cached_urls[url][0]) + '\t' + str(self.cached_urls[url][1]) + '\n')
@staticmethod
def filter_url(urls):
""" could be personalized implemented """
return urls
def save_content(self, url, depth, html_pyquery):
""" could be personalized implemented """
pass
def run(self):
while not self.url_queue.empty() and self.current_depth <= self._depth:
url_info = self.url_queue.get()
url = url_info[0]
depth = url_info[1]
self.current_depth = depth
logging.info('Depth: %d, URL: %s', depth, url)
''' get html content from the url '''
html_pyquery = self.__get_html(url)
if not html_pyquery:
continue
''' save the needed information from the html content, e.g., images, href, etc. '''
self.save_content(url, depth, html_pyquery)
''' cache the crawled urls '''
if url in self.cached_urls:
logging.warning('URL: %s -> There should not be cached urls in the queue, check your code !!!', url)
break
else:
self.cached_urls[url] = [1, depth]
''' extract urls from the html content, except for the cached urls '''
extracted_urls = []
if self.current_depth < self._depth:
extracted_urls = self.__extract_url(html_pyquery)
''' only retain the needed urls, and put them into the queue '''
filtered_urls = self.filter_url(extracted_urls)
for new_url in filtered_urls:
self.url_queue.put((new_url, depth + 1))
self.__dump_cached_urls()
class URLCrawler(Crawler):
def save_content(self, url, depth, html_pyquery):
parse_list = urlsplit(url)
host = parse_list[0] + '://' + parse_list[1]
with open(self._out_dir + 'savedLinks.txt', 'a') as outfile:
outfile.write(host + '\n')
class ImageCrawler(Crawler):
def save_content(self, url, depth, html_pyquery):
all_imgs = html_pyquery('img').map(lambda i, element: PyQuery(element)('img').attr('src'))
for raw_url in all_imgs:
image_name = raw_url.split('/')[-1]
words = image_name.split('.')
suffix = ''
if len(words) > 1:
suffix = words[-1]
print(image_name + ', ' + suffix)
try:
img_url = quote(raw_url, safe='!#$&()*+,-./:;=?@_~\'', encoding='utf-8')
req = Request(img_url)
response = urlopen(req)
content = response.read()
m = hashlib.md5()
m.update(content)
content_hash = m.hexdigest()
filename = content_hash + '.' + suffix
if os.path.exists(self._out_dir + filename):
continue
with open(self._out_dir + filename, 'wb') as image_file:
image_file.write(content)
except URLError as e:
logging.warning('Request: URLError: %s\n\tRaw URL: %s', e.reason, raw_url)
continue
except Exception as e:
logging.warning('Request: %s : %s\n\tRaw URL: %s', type(e), e, raw_url)
continue
class ICML2019Crawler(Crawler):
"""
Parameters: -a https://icml.cc/Conferences/2019/Schedule?type=Poster -d 0
"""
@staticmethod
def heading_author(_, element):
entry = PyQuery(element)
heading = entry('.maincardBody').text()
author_list = entry('.maincardFooter').text().split(' · ')
return heading, author_list
def save_content(self, url, depth, html_pyquery):
all_entries = html_pyquery('.maincard')
heading_authors = all_entries.map(ICML2019Crawler.heading_author)
with open(self._out_dir + 'ICML2019.csv', 'w', encoding='utf8') as outfile:
for heading, author_list in heading_authors:
outfile.write('\t'.join([heading] + author_list) + '\n')
class CVPR2019Crawler(Crawler):
"""
Parameters: -a http://cvpr2019.thecvf.com/program/main_conference -d 0
"""
def save_content(self, url, depth, html_pyquery):
topic_title_author_dic = {}
all_tables = html_pyquery('table')
for a_table in all_tables.items():
entries = a_table('tr').filter(lambda i, this: PyQuery(this).attr('class') != 'blue-bottom')
current_topic = ''
current_id = 0
current_title = ''
current_authors = []
for idx, field in enumerate(entries('td')):
if idx % 6 == 0:
if field.text is not None:
current_topic = field.text
elif idx % 6 == 3:
current_title = field.text
elif idx % 6 == 4:
current_authors = field.text.split(';')
elif idx % 6 == 5:
current_id = field.text
if current_id not in topic_title_author_dic.keys():
topic_title_author_dic[current_id] = [current_topic, current_title] + current_authors
with open(self._out_dir + 'CVPR2019.csv', 'w', encoding='utf8') as outfile:
for id, topic_title_author_list in topic_title_author_dic.items():
outfile.write('\t'.join([id] + topic_title_author_list) + '\n')
def main():
# if sys.modules['idlelib']:
# sys.argv.extend(input("Args: ").split())
# args.init_urls = 'http://www.baidu.com'
# args.depth = 3
parser = argparse.ArgumentParser(description='A crawler for website')
parser.add_argument('-a', type=str, required=True, metavar='WebAddr', dest='init_urls',
help='Specify the Website Address')
parser.add_argument('-d', type=int, default=1, metavar='CrawlDepth', dest='depth', help='Specify the Crawler Depth')
parser.add_argument('-o', type=str, default='./', metavar='OutputDir', dest='out_dir',
help='Specify the Output Directory')
args = parser.parse_args()
crawler = CVPR2019Crawler(args)
crawler.run()
if __name__ == '__main__':
main()
|
mit
| -901,503,238,482,499,200
| 36.588462
| 120
| 0.527883
| false
| 3.499105
| false
| false
| false
|
jacobwindsor/pubchem-ranker
|
CompoundRanker/DataManipulators/CIDGatherer.py
|
1
|
2702
|
import sys
from CompoundRanker.database import get_db,query_db
from CompoundRanker import app
from requests import exceptions, get
class CIDGatherer(object):
def harvest(self, dataset_id):
"""
Harvest all of the CIDs from PubChem
:return: List of tuples [(cid, metab_id),]
"""
# Query only returns the metabolites that don't already have CIDs associated
query = "SELECT t1.id, t1.cas from metabolites t1 " \
"LEFT JOIN pubchem_compounds t2 ON t2.metab_ID = t1.id " \
"WHERE t2.metab_ID is NULL AND t1.dataset_id is ?"
results = query_db(query, dataset_id)
count = len(results)
since_wait = 0
since_report = 0
cid_metab_id_map = [] # List of tuples
for i, result in enumerate(results):
since_wait += 1
since_report += 1
if since_wait > 2:
sys.stdout.write("Waiting 1 second \n")
sys.stdout.flush()
since_wait = 0
if since_report > 49:
sys.stdout.write(str(cid_metab_id_map))
sys.stdout.write("\n")
sys.stdout.flush()
since_report = 0
cids = self.get_cids(result['cas'])
metab_id = result['id']
if cids:
for cid in cids:
cid_metab_id_map.append((cid, metab_id))
# Progress
perc = ((i+1)/count) * 100
sys.stdout.write("%s%% \n" % perc)
sys.stdout.flush()
return cid_metab_id_map
def get_cids(self, cas):
"""
Use the PubChem API to get the CID
:param cas: string - CAS identifier
:return: list of CIDs
"""
uri = "http://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/%s/cids/json" \
"?email=%s"
try:
response = get((uri % (cas, app.config['ADMIN_EMAIL']))).json()
try:
cids = response['IdentifierList']['CID']
return cids
except KeyError:
return None
except (exceptions.ConnectionError, TimeoutError, exceptions.Timeout,
exceptions.ConnectTimeout, exceptions.ReadTimeout) as e:
# Error. return the error and the CAS number that this error occured on
sys.stderr.write("Error: %s. Occurred on CAS: %s", (e, cas))
sys.stderr.flush()
sys.stdout.flush()
def save(self, cid_metab_id_map):
insert_query = "INSERT INTO pubchem_compounds(CID, metab_ID) VALUES (?, ?)"
return query_db(insert_query, cid_metab_id_map, many=True)
|
mit
| 3,315,396,116,340,576,000
| 31.95122
| 85
| 0.53738
| false
| 3.726897
| false
| false
| false
|
sertansenturk/symbtrdataextractor
|
symbtrdataextractor/unittests/extractor_tests.py
|
1
|
3913
|
import json
import os
from symbtrdataextractor.dataextractor import DataExtractor
from symbtrdataextractor.reader.mu2 import Mu2Reader
_curr_folder = os.path.dirname(os.path.abspath(__file__))
def _basic_txt_extractor(score_name, use_name=True):
txt_filename = os.path.join(_curr_folder, 'data', score_name + '.txt')
symbtr_name = score_name if use_name is True else None
# initialize the extractor
extractor = DataExtractor(
extract_all_labels=False, melody_sim_thres=0.7, lyrics_sim_thres=0.7,
save_structure_sim=True, get_recording_rels=False, print_warnings=True)
# extract txt_data
txt_data, is_data_valid = extractor.extract(txt_filename,
symbtr_name=symbtr_name)
# compare with a previously saved result
score_data_file = os.path.join(_curr_folder, 'data', score_name + '.json')
saved_data = json.load(open(score_data_file))
assert saved_data == txt_data, u"{0:s}: the result is different".format(
score_name)
assert is_data_valid, "The data is not valid (or the validations failed.)"
def test_with_instrumental():
"""
Tests the result of a instrumental score
"""
scorename = 'ussak--sazsemaisi--aksaksemai----neyzen_aziz_dede'
_basic_txt_extractor(scorename)
def test_without_name():
"""
Tests the result of a score without the symbtr_name input given
"""
scorename = 'ussak--sazsemaisi--aksaksemai----neyzen_aziz_dede'
_basic_txt_extractor(scorename, use_name=False)
def test_with_free_usul():
"""
Tests the result of a score with free (serbest) usul
"""
scorename = 'saba--miraciye--serbest--pes_heman--nayi_osman_dede'
_basic_txt_extractor(scorename)
def test_with_phrase_annotation():
"""
Tests the result of a score with phrase_annotations
"""
scorename = 'huzzam--sarki--curcuna--guzel_gun_gormedi--haci_arif_bey'
_basic_txt_extractor(scorename)
def test_with_vocal_section_starting_mid_measure():
"""
Tests the result with the score of a vocal composition in which some of
the lyrics lines start in middle of the measure
"""
scorename = 'hicaz_humayun--beste--hafif--olmada_diller--abdulhalim_aga'
_basic_txt_extractor(scorename)
def test_with_full_input():
"""
Tests the result with complete information available, i.e. mbid, phrase
annotation and user provided segmentation
"""
# inputs
scorename = 'kurdilihicazkar--sarki--agiraksak--ehl-i_askin--tatyos_efendi'
txt_filename = os.path.join(_curr_folder, 'data', scorename + '.txt')
mbid = 'b43fd61e-522c-4af4-821d-db85722bf48c'
auto_seg_file = os.path.join(_curr_folder, 'data', scorename + '.autoSeg')
auto_seg_bounds = json.load(open(auto_seg_file, 'r'))['boundary_noteIdx']
mu2_filename = os.path.join(_curr_folder, 'data', scorename + '.mu2')
# initialize the extractor
extractor = DataExtractor(
extract_all_labels=False, melody_sim_thres=0.75, lyrics_sim_thres=0.75,
save_structure_sim=True, get_recording_rels=False, print_warnings=True)
# extract txt_data
txt_data, is_data_valid = extractor.extract(
txt_filename, symbtr_name=scorename, mbid=mbid,
segment_note_bound_idx=auto_seg_bounds)
# extract mu2 header metadata
mu2_header, header_row, is_header_valid = Mu2Reader.read_header(
mu2_filename, symbtr_name=scorename)
# merge
data = DataExtractor.merge(txt_data, mu2_header)
is_valid = is_data_valid and is_header_valid
# compare with a previously saved result
score_data_file = os.path.join(_curr_folder, 'data', scorename + '.json')
saved_data = json.load(open(score_data_file))
assert saved_data == data, u"{0:s}: the result is different".format(
scorename)
assert is_valid, "The data is not valid (or the validations failed.)"
|
agpl-3.0
| 6,322,922,692,640,797,000
| 31.882353
| 79
| 0.675185
| false
| 3.202128
| true
| false
| false
|
takeshixx/deen
|
deen/plugins/codecs/plugin_url.py
|
1
|
1130
|
try:
# Python 3
import urllib.parse as urllibparse
except ImportError:
# Python 2
import urllib as urllibparse
from .. import DeenPlugin
class DeenPluginUrl(DeenPlugin):
name = 'url'
display_name = 'URL'
cmd_name = 'url'
cmd_help='URL encode/decode data'
def __init__(self):
super(DeenPluginUrl, self).__init__()
def process(self, data):
super(DeenPluginUrl, self).process(data)
try:
# urllib requires str?
data = urllibparse.quote_plus(data.decode())
data = data.encode()
except Exception as e:
self.error = e
self.log.error(self.error)
self.log.debug(self.error, exc_info=True)
return data
def unprocess(self, data):
super(DeenPluginUrl, self).unprocess(data)
try:
data = urllibparse.unquote_plus(data.decode())
data = data.encode()
except (UnicodeDecodeError, TypeError) as e:
self.error = e
self.log.error(self.error)
self.log.debug(self.error, exc_info=True)
return data
|
apache-2.0
| -2,507,691,634,774,062,600
| 26.560976
| 58
| 0.582301
| false
| 3.869863
| false
| false
| false
|
darknight-007/Firmware
|
testScripts/testOffboardPositionControlWithGainAndIntertialParamChange.py
|
1
|
5733
|
"""
testing offboard positon control with a simple takeoff script
"""
import rospy
from mavros_msgs.msg import State
from geometry_msgs.msg import PoseStamped, Point, Quaternion
import math
import numpy
from gazebo_msgs.srv import SetLinkProperties
from gazebo_msgs.srv import SetLinkPropertiesRequest
from gazebo_msgs.srv import GetLinkProperties
from gazebo_msgs.srv import GetLinkPropertiesRequest
from gazebo_msgs.srv import GetLinkPropertiesResponse
from sensor_msgs.msg import Joy
from mavros_msgs.srv import ParamSetRequest
from mavros_msgs.srv import ParamSet
from mavros_msgs.msg import ParamValue
class OffboardPosCtlWithOnlineDynamicalUpdates:
curr_pose = PoseStamped()
waypointIndex = 0
distThreshold = 0.4
sim_ctr = 1
des_pose = PoseStamped()
isReadyToFly = False
locations = numpy.matrix([[2, 0, 1, 0, 0, -0.48717451, -0.87330464],
[0, 2, 1, 0, 0, 0, 1],
[-2, 0, 1, 0., 0., 0.99902148, -0.04422762],
[0, -2, 1, 0, 0, 0, 0],
])
MPC_PITCH_P = 0
MPC_PITCH_D = 1
MPC_ROLL_P = 2
MPC_ROLL_D = 3
MPC_PITCHRATE_P = 4
MPC_PITCHRATE_D = 5
MPC_ROLLRATE_P = 6
MPC_ROLLRATE_D = 7
MPC_XY_CRUISE = 8
def __init__(self):
rospy.init_node('offboard_test', anonymous=True)
pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10)
mocap_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback=self.mocap_cb)
state_sub = rospy.Subscriber('/mavros/state', State, callback=self.state_cb)
nanokontrolSub = rospy.Subscriber('/nanokontrol/nanokontrol', Joy, callback=self.nanokontrolCallback)
gazebo_service_set_link_properties = rospy.ServiceProxy('/gazebo/set_link_properties', SetLinkProperties)
gazebo_service_get_link_properties = rospy.ServiceProxy('/gazebo/get_link_properties', GetLinkProperties)
self.param_service = rospy.ServiceProxy('/mavros/param/set', ParamSet)
rate = rospy.Rate(10) # Hz
rate.sleep()
self.des_pose = self.copy_pose(self.curr_pose)
shape = self.locations.shape
while not rospy.is_shutdown():
#print self.sim_ctr, shape[0], self.waypointIndex
if self.waypointIndex is shape[0]:
self.waypointIndex = 0
self.sim_ctr += 1
if self.isReadyToFly:
des_x = self.locations[self.waypointIndex, 0]
des_y = self.locations[self.waypointIndex, 1]
des_z = self.locations[self.waypointIndex, 2]
self.des_pose.pose.position.x = des_x
self.des_pose.pose.position.y = des_y
self.des_pose.pose.position.z = des_z
self.des_pose.pose.orientation.x = self.locations[self.waypointIndex, 3]
self.des_pose.pose.orientation.y = self.locations[self.waypointIndex, 4]
self.des_pose.pose.orientation.z = self.locations[self.waypointIndex, 5]
self.des_pose.pose.orientation.w = self.locations[self.waypointIndex, 6]
curr_x = self.curr_pose.pose.position.x
curr_y = self.curr_pose.pose.position.y
curr_z = self.curr_pose.pose.position.z
dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)*(curr_y - des_y) + (curr_z - des_z)*(curr_z - des_z))
if dist < self.distThreshold:
self.waypointIndex += 1
#des_params = self.updateUAVInertialParam(gazebo_service_get_link_properties)
# print dist, curr_x, curr_y, curr_z, self.waypointIndex
pose_pub.publish(self.des_pose)
rate.sleep()
def updateUAVInertialParam(self, gazebo_service_get_link_properties):
# current_params = GetLinkPropertiesResponse()
# current_params = gazebo_service_get_link_properties.call(GetLinkPropertiesRequest('base_link'))
# des_params = current_params
# des_params = SetLinkPropertiesRequest()
# des_params.mass = current_params.mass + 0.3
# des_params.gravity_mode = current_params.gravity_mode
# des_params.com = current_params.com
# des_params.ixx = current_params.ixx
# des_params.ixy = current_params.ixy
# des_params.ixz = current_params.ixz
# des_params.iyy = current_params.iyy
# des_params.iyz = current_params.ixz
# des_params.izz = current_params.izz
# des_params.link_name = 'base_link'
# gazebo_service_set_link_properties.call(des_params)
des_params = 0
return des_params
def copy_pose(self, pose):
pt = pose.pose.position
quat = pose.pose.orientation
copied_pose = PoseStamped()
copied_pose.header.frame_id = pose.header.frame_id
copied_pose.pose.position = Point(pt.x, pt.y, pt.z)
copied_pose.pose.orientation = Quaternion(quat.x, quat.y, quat.z, quat.w)
return copied_pose
def mocap_cb(self, msg):
# print msg
self.curr_pose = msg
def state_cb(self,msg):
print msg.mode
if(msg.mode=='OFFBOARD'):
self.isReadyToFly = True
print "readyToFly"
def nanokontrolCallback(self,msg):
velocity = (((msg.axes[0])+1)*4)
param = ParamValue()
param.real = velocity
paramReq = ParamSetRequest()
paramReq.param_id = 'MPC_XY_CRUISE'
paramReq.value = param
self.param_service.call(paramReq)
if __name__ == "__main__":
OffboardPosCtlWithOnlineDynamicalUpdates()
|
mit
| -5,823,130,695,435,417,000
| 35.987097
| 139
| 0.618524
| false
| 3.344807
| false
| false
| false
|
ytsarev/rally
|
rally/openstack/common/config/generator.py
|
1
|
10531
|
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from rally.openstack.common import gettextutils
from rally.openstack.common import importutils
gettextutils.install('rally')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'rally'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
|
apache-2.0
| 3,891,286,383,344,923,600
| 33.302932
| 79
| 0.578103
| false
| 3.734397
| false
| false
| false
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/resource.py
|
1
|
1819
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Resource) on 2019-05-07.
# 2019, SMART Health IT.
from . import fhirabstractresource
class Resource(fhirabstractresource.FHIRAbstractResource):
""" Base Resource.
This is the base resource type for everything.
"""
resource_type = "Resource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.id = None
""" Logical id of this artifact.
Type `str`. """
self.implicitRules = None
""" A set of rules under which this content was created.
Type `str`. """
self.language = None
""" Language of the resource content.
Type `str`. """
self.meta = None
""" Metadata about the resource.
Type `Meta` (represented as `dict` in JSON). """
super(Resource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Resource, self).elementProperties()
js.extend([
("id", "id", str, False, None, False),
("implicitRules", "implicitRules", str, False, None, False),
("language", "language", str, False, None, False),
("meta", "meta", meta.Meta, False, None, False),
])
return js
import sys
try:
from . import meta
except ImportError:
meta = sys.modules[__package__ + '.meta']
|
bsd-3-clause
| -6,473,601,764,319,109,000
| 29.830508
| 105
| 0.586586
| false
| 4.162471
| false
| false
| false
|
gouthambs/qtk-python
|
qtk/creators/indexes.py
|
1
|
3937
|
import QuantLib as ql
from .common import CreatorBase
from qtk.templates import Template as T
from qtk.fields import Field as F
class USDLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_USDLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.USDLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "USD"}
@classmethod
def set_info(cls):
cls.desc("Creates USD LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class CADLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_CADLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.CADLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "CAD"}
@classmethod
def set_info(cls):
cls.desc("Creates CAD LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class GBPLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_GBPLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.GBPLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "GBP"}
@classmethod
def set_info(cls):
cls.desc("Creates GBP LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class AUDLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_AUDLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.AUDLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "AUD"}
@classmethod
def set_info(cls):
cls.desc("Creates AUD LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class JPYLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_JPYLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.JPYLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "JPY"}
@classmethod
def set_info(cls):
cls.desc("Creates JPY LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class EURLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_EURLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.EURLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "EUR"}
@classmethod
def set_info(cls):
cls.desc("Creates EUR LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
|
mit
| 3,303,185,191,772,193,300
| 29.292308
| 63
| 0.637795
| false
| 2.978064
| false
| false
| false
|
felixboes/hosd
|
experimental/compute_morse.py
|
1
|
3618
|
#!/usr/bin/env python
# The software pyradbar is a bunch of programs to compute the homology of
# Sullivan diagrams.
# Copyright (C) 2015 - 2017 Felix Boes
#
# This file is part of pyradbar.
#
# pyradbar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyradbar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyradbar. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pyradbar
import subprocess
import sys
import os
import time
import inspect
def call_sage(g=1, m=2, more_verbose=None, result_file=None, sage_path=None):
script_path = './pyradbar/morse_computation.py'
sys.stdout.write('Calling ' + sage_path + ' -python ' + script_path + ' ' + str(g) + ' ' + str(m) + ' ' + str(more_verbose) + ' ' + str(result_file) + '\n')
sys.stdout.flush()
cmd = [sage_path, "-python", script_path, str(g), str(m), str(more_verbose), str(result_file)]
subprocess.call(cmd)
def main():
# check for correct version.
major, minor = sys.version_info[0], sys.version_info[1]
if major < 2 or (major == 2 and minor < 7):
raise "Python >= 2.7 is required for argument parsing."
# Use the argparse library. The library optparse is deprecated since version 2.7.
# Compare the documentation: https://docs.python.org/2/library/argparse.html
# Create the argument parser.
# Note: Config files can be processed. In order to do so, we have to give fromfile_prefix_chars='_' with _ a symbol.
parser = argparse.ArgumentParser(
add_help = True,
fromfile_prefix_chars='@',
description='Compute the homology of the compactification of the unilevel radial slit domains aka sulivan diagrams.'
)
# Provide all arguments.
# Note: we provide nargs=N and the N arguments from the command line will be gathered together into a list.
# Thus, we supress nargs.
parser.add_argument('-g', '--gen', required=True, action='store', type=int, dest='g', metavar='arg', help='The genus of the Riemann surfaces')
parser.add_argument('-m', '--pun', required=True, action='store', type=int, dest='m', metavar='arg', help='The number of punctures of the Riemann surfaces')
parser.add_argument('-v', action='store_true', dest='more_verbose', help='Print more status information.', default=False)
parser.add_argument('--sage', action='store', type=str, dest='sage_path', metavar='path', help='The Path to the sage executable', default='./sage-6.8-x86_64-Linux/sage')
args=vars( parser.parse_args() )
# The name of the results file.
args['result_file'] = './results/' + ''.join( [str(param).replace(' ', '_') for param in sys.argv if str(param) ] )
tee = pyradbar.Tee(args['result_file'], 'w')
pre, valid = pyradbar.preamble( args['sage_path'] )
sys.stdout.write(pre + '\n')
sys.stdout.flush()
if valid == False:
print "Could not initialize everything. Abroating."
return 1
call_sage( **args )
sys.stdout.write('\n\n\n')
sys.stdout.flush()
if __name__ == "__main__":
main()
|
gpl-3.0
| 4,686,020,170,505,466,000
| 43.121951
| 200
| 0.651741
| false
| 3.540117
| false
| false
| false
|
mariocesar/pengbot
|
src/pengbot/adapters/base.py
|
1
|
2798
|
import asyncio
from collections import defaultdict
from functools import wraps
from pengbot import logger
from pengbot.context import Context
from pengbot.utils import isbound
class UnknownCommand(Exception):
pass
class BaseAdapter:
handlers = []
signals = {}
running = False
name = None
loop = None
def __init__(self, setup_method, **kwargs):
self.context = Context()
self.setup_method = setup_method
def __call__(self, *args, **kwargs):
try:
self.run()
except KeyboardInterrupt:
exit(0)
@property
def name(self):
return self.context.get('name', None) or self.setup_method.__name__
def run(self):
self.setup_method()
self.receive()
def receive(self, *args):
self.loop = asyncio.get_event_loop()
self.loop.set_debug(True)
try:
self.loop.run_until_complete(self.handle_message(*args))
finally:
self.loop.close()
async def handle_message(self, *args):
for handler in self.handlers:
coroutine = handler(*args)
print('handler=', handler)
print('create_task=', coroutine)
task = self.emit(coroutine)
print('task=', task)
print()
def emit(self, coroutine):
print('emit=', coroutine)
self.loop.create_task(coroutine)
def send(self, message):
raise NotImplementedError()
def say(self, *args, **kwargs):
raise NotImplementedError()
# Directives
def signal(self):
adapter = self
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
print('func=', func)
result = await func(*args, **kwargs)
for listener in adapter.signals.get(func.__qualname__, []):
print('listener=', listener)
if isinstance(result, tuple):
adapter.emit(listener(*result))
else:
adapter.emit(listener(result))
return result
return wrapper
return decorator
def listen(self, signal=None):
def decorator(func):
@wraps(func)
def callback(*args, **kwargs):
return func(*args, **kwargs)
if not signal:
self.handlers.append(callback)
else:
if signal in self.signals:
self.signals[signal.__qualname__].append(callback)
else:
self.signals[signal.__qualname__] = [callback]
return decorator
class SocketAdapter(BaseAdapter):
pass
class ProcessAdapter(BaseAdapter):
pass
|
mit
| 7,566,608,231,802,680,000
| 23.330435
| 75
| 0.546104
| false
| 4.609555
| false
| false
| false
|
kittiu/sale-workflow
|
sale_automatic_workflow_payment_mode/models/automatic_workflow_job.py
|
1
|
2353
|
# -*- coding: utf-8 -*-
# © 2016 Camptocamp SA, Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import logging
from odoo import models, api, fields
from odoo.tools.safe_eval import safe_eval
from odoo.addons.sale_automatic_workflow.models.automatic_workflow_job \
import savepoint
_logger = logging.getLogger(__name__)
class AutomaticWorkflowJob(models.Model):
_inherit = 'automatic.workflow.job'
@api.model
def run_with_workflow(self, sale_wkf):
workflow_domain = [('workflow_process_id', '=', sale_wkf.id)]
res = super(AutomaticWorkflowJob, self).run_with_workflow(sale_wkf)
if sale_wkf.register_payment:
self._register_payments(
safe_eval(sale_wkf.payment_filter_id.domain) +
workflow_domain)
return res
@api.model
def _register_payments(self, payment_filter):
invoice_obj = self.env['account.invoice']
invoices = invoice_obj.search(payment_filter)
_logger.debug('Invoices to Register Payment: %s', invoices.ids)
for invoice in invoices:
partner_type = invoice.type in ('out_invoice', 'out_refund') and \
'customer' or 'supplier'
payment_mode = invoice.payment_mode_id
if not payment_mode.fixed_journal_id:
_logger.debug('Unable to Register Payment for invoice %s: '
'Payment mode %s must have fixed journal',
invoice.id, payment_mode.id)
return
with savepoint(self.env.cr):
payment = self.env['account.payment'].create({
'invoice_ids': [(6, 0, invoice.ids)],
'amount': invoice.residual,
'payment_date': fields.Date.context_today(self),
'communication': invoice.reference or invoice.number,
'partner_id': invoice.partner_id.id,
'partner_type': partner_type,
'payment_type': payment_mode.payment_type,
'payment_method_id': payment_mode.payment_method_id.id,
'journal_id': payment_mode.fixed_journal_id.id,
'currency_id': invoice.currency_id.id,
})
payment.post()
return
|
agpl-3.0
| -7,847,073,784,139,087,000
| 40.263158
| 78
| 0.576105
| false
| 4.083333
| false
| false
| false
|
warrenspe/NanoDB
|
NanoQueries/Alter.py
|
1
|
1805
|
# Standard imports
import os
# Project imports
from _BaseQuery import BaseQuery
import NanoIO.Table
import NanoIO.File
class Alter(BaseQuery):
name = None
addColumns = None
removeColumns = None
modifyColumns = None
addIndex = None
removeIndex = None
grammar = """
"table"
<name: _>
{
(addColumns: "add" "column" <name: _> <type: _>)
(removeColumns: "remove" "column" <name: _>)
(modifyColumns: "modify" "column" <name: _> <newName: _> <newType: _>)
["add" "index" <addIndex: _>]
["remove" "index" <removeIndex: _>]
}
"""
def executeQuery(self, conn):
# Get tableIO object
tableIO = conn._getTable(self.name)
# Back up the TableIO object
#NanoIO.File._renameTable(tableIO, "_NanoDB_Backup
# Create a new TableIO object
# Overwrite our connections tableio object for this table
# Add columns as desired to our new table io object
# Remove columns as desired from this table io object
# Modify columns as desired to this table io object
# Add indices as desired to this table io object
# Remove indices as desired from this table io object
# Serialize our new table io object
# Copy data from our old table to our new table
# Delete our old table IO object
tmpTableName = "_tmp_alter_table_" + tableIO.tableName
NanoIO.File.createTable(tableIO.dbname, tmpTableName)
newTableIO = NanoIO.Table.TableIO(tableIO.dbName, tmpTableName)
# Update config
newTableIO.config
# Update table definition
# Remove indices
|
gpl-3.0
| -183,135,863,889,413,900
| 26.348485
| 89
| 0.580609
| false
| 4.257075
| false
| false
| false
|
amenonsen/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_fcp.py
|
2
|
7005
|
#!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_fcp
short_description: NetApp ONTAP Start, Stop and Enable FCP services.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Start, Stop and Enable FCP services.
options:
state:
description:
- Whether the FCP should be enabled or not.
choices: ['present', 'absent']
default: present
status:
description:
- Whether the FCP should be up or down
choices: ['up', 'down']
default: up
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: create FCP
na_ontap_fcp:
state: present
status: down
hostname: "{{hostname}}"
username: "{{username}}"
password: "{{password}}"
vserver: "{{vservername}}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapFCP(object):
"""
Enable and Disable FCP
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
status=dict(required=False, choices=['up', 'down'], default='up')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
return
def create_fcp(self):
"""
Create's and Starts an FCP
:return: none
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-create'), True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating FCP: %s' %
(to_native(error)),
exception=traceback.format_exc())
def start_fcp(self):
"""
Starts an existing FCP
:return: none
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-start'), True)
except netapp_utils.zapi.NaApiError as error:
# Error 13013 denotes fcp service already started.
if to_native(error.code) == "13013":
return None
else:
self.module.fail_json(msg='Error starting FCP %s' % (to_native(error)),
exception=traceback.format_exc())
def stop_fcp(self):
"""
Steps an Existing FCP
:return: none
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-stop'), True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error Stoping FCP %s' %
(to_native(error)),
exception=traceback.format_exc())
def destroy_fcp(self):
"""
Destroys an already stopped FCP
:return:
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-destroy'), True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error destroying FCP %s' %
(to_native(error)),
exception=traceback.format_exc())
def get_fcp(self):
fcp_obj = netapp_utils.zapi.NaElement('fcp-service-get-iter')
fcp_info = netapp_utils.zapi.NaElement('fcp-service-info')
fcp_info.add_new_child('vserver', self.parameters['vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(fcp_info)
fcp_obj.add_child_elem(query)
result = self.server.invoke_successfully(fcp_obj, True)
# There can only be 1 FCP per vserver. If true, one is set up, else one isn't set up
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
return True
else:
return False
def current_status(self):
try:
status = self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-status'), True)
return status.get_child_content('is-available') == 'true'
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error destroying FCP: %s' %
(to_native(error)),
exception=traceback.format_exc())
def apply(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_fcp", cserver)
exists = self.get_fcp()
changed = False
if self.parameters['state'] == 'present':
if exists:
if self.parameters['status'] == 'up':
if not self.current_status():
self.start_fcp()
changed = True
else:
if self.current_status():
self.stop_fcp()
changed = True
else:
self.create_fcp()
if self.parameters['status'] == 'up':
self.start_fcp()
elif self.parameters['status'] == 'down':
self.stop_fcp()
changed = True
else:
if exists:
if self.current_status():
self.stop_fcp()
self.destroy_fcp()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Start, Stop and Enable FCP services.
"""
obj = NetAppOntapFCP()
obj.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,642,160,182,949,105,000
| 32.357143
| 114
| 0.561599
| false
| 3.98691
| false
| false
| false
|
mgeorgehansen/FIFE_Technomage
|
engine/python/fife/extensions/pychan/widgets/basictextwidget.py
|
1
|
2140
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2009 by the FIFE team
# http://www.fifengine.de
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from widget import Widget
from common import *
class BasicTextWidget(Widget):
"""
The base class for widgets which display a string - L{Label},L{ClickLabel},L{Button}, etc.
Do not use directly.
New Attributes
==============
- text: The text (depends on actual widget)
Data
====
The text can be set via the L{distributeInitialData} method.
"""
ATTRIBUTES = Widget.ATTRIBUTES + [UnicodeAttr('text')]
DEFAULT_HEXPAND = 1
DEFAULT_VEXPAND = 0
def __init__(self, text = u"",**kwargs):
self.margins = (5,5)
self.text = text
super(BasicTextWidget,self).__init__(**kwargs)
# Prepare Data collection framework
self.accepts_initial_data = True
self._realSetInitialData = self._setText
def _getText(self): return gui2text(self.real_widget.getCaption())
def _setText(self,text): self.real_widget.setCaption(text2gui(text))
text = property(_getText,_setText)
def resizeToContent(self, recurse = True):
self.height = self.real_font.getHeight() + self.margins[1]*2
self.width = self.real_font.getWidth(text2gui(self.text)) + self.margins[0]*2
|
lgpl-2.1
| -1,168,638,644,344,881,700
| 32.516129
| 91
| 0.648598
| false
| 3.572621
| false
| false
| false
|
ingadhoc/account-payment
|
account_check/models/account_chart_template.py
|
1
|
3098
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields
import logging
_logger = logging.getLogger(__name__)
class AccountChartTemplate(models.Model):
_inherit = 'account.chart.template'
rejected_check_account_id = fields.Many2one(
'account.account.template',
'Rejected Check Account',
help='Rejection Checks account, for eg. "Rejected Checks"',
# domain=[('type', 'in', ['other'])],
)
deferred_check_account_id = fields.Many2one(
'account.account.template',
'Deferred Check Account',
help='Deferred Checks account, for eg. "Deferred Checks"',
# domain=[('type', 'in', ['other'])],
)
holding_check_account_id = fields.Many2one(
'account.account.template',
'Holding Check Account',
help='Holding Checks account for third checks, '
'for eg. "Holding Checks"',
# domain=[('type', 'in', ['other'])],
)
def _load_template(self, company, code_digits=None, account_ref=None, taxes_ref=None):
account_ref, taxes_ref = super()._load_template(
company, code_digits=code_digits, account_ref=account_ref, taxes_ref=taxes_ref)
for field in [
'rejected_check_account_id',
'deferred_check_account_id',
'holding_check_account_id']:
account_field = self[field]
# TODO we should send it in the context and overwrite with
# lower hierichy values
if account_field:
company[field] = account_ref[account_field.id]
return account_ref, taxes_ref
def _create_bank_journals(self, company, acc_template_ref):
"""
Bank - Cash journals are created with this method
Inherit this function in order to add checks to cash and bank
journals. This is because usually will be installed before chart loaded
and they will be disable by default
"""
res = super(
AccountChartTemplate, self)._create_bank_journals(
company, acc_template_ref)
# creamos diario para cheques de terceros
received_third_check = self.env.ref(
'account_check.account_payment_method_received_third_check')
delivered_third_check = self.env.ref(
'account_check.account_payment_method_delivered_third_check')
self.env['account.journal'].create({
'name': 'Cheques de Terceros',
'type': 'cash',
'company_id': company.id,
'inbound_payment_method_ids': [
(4, received_third_check.id, None)],
'outbound_payment_method_ids': [
(4, delivered_third_check.id, None)],
})
self.env['account.journal'].with_context(
force_company_id=company.id)._enable_issue_check_on_bank_journals()
return res
|
agpl-3.0
| 7,020,589,236,783,189,000
| 39.763158
| 91
| 0.573273
| false
| 4.243836
| false
| false
| false
|
deepmind/reverb
|
reverb/trajectory_writer.py
|
1
|
27672
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the TrajectoryWriter."""
import datetime
import itertools
from typing import Any, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
from reverb import errors
from reverb import pybind
import tree
class TrajectoryWriter:
"""The TrajectoryWriter is used to write data to tables at a Reverb server.
At a high level, the process of inserting trajectories can be summarized as:
* Structured data is appended to an internal buffer using the `append`
method and the caller receives a reference to each element (i.e leaf node)
in the original data.
* Compatible data referenced (i.e same dtype and compatible shapes) are
concatenated into `TrajectoryColumn`s which in turn are combined into a
trajectory and inserted into a table using the `create_item` method.
It is important to understand that the structure of the data provided to
`append` does NOT need to match the structure of the trajectory which the
sampler will receive when it samples the item. To illustrate, consider a
scenario were want to sample SARS (State-action-reward-state) style trajectory
of length 5. Furthermore, we would like a trajectory to start at every step.
It would look something like this:
```python
client = Client(...)
env = .... # Construct the environment
policy = .... # Construct the agent's policy
with client.trajectory_writer(num_keep_alive_refs=5) as writer:
for episode in range(NUM_EPISODES):
timestep = env.reset()
# You probably have strong opinions of whether the actions should be
# aligned with the step it originated from or the destination. In this
# example we'll align it with the destination state and thus we'll start
# off by appending the timestep WITHOUT an action.
writer.append({
'observation': timestep.observation,
})
while not timestep.last():
# Select the action according to your policy and act it out in the
# environment.
action = policy(timestep)
timestep = env.step(action)
# Now we have both an action and the state it resulted in. We append
# both of these together to the writer. This will result in them
# sharing the same index in `writer.history`.
writer.append({
'observation': timestep.observation,
'reward': timestep.reward,
'action': action,
})
# Once we have seen at least 5 timesteps (including the first one
# which does not have an aligned action) then we can start inserting
# items that reference the last 5 timesteps and the last 4 actions.
if writer.episode_steps >= 5:
trajectory = {
'states': writer.history['observation'][-5:],
'rewards': writer.history['reward'][-4:],
'actions': writer.history['action'][-4:],
}
writer.create_item(
table='my_table',
priority=calc_priority(trajectory),
trajectory=trajectory)
# Block until all pending items have been sent to the server and
# inserted into 'my_table'. This also clears the buffers so history will
# once again be empty and `writer.episode_steps` is 0.
writer.end_episode()
```
"""
def __init__(self, internal_writer: pybind.TrajectoryWriter):
"""Constructor of TrajectoryWriter (must only be called by `Client`)."""
self._writer = internal_writer
# The union of the structures of all data passed to `append`. The structure
# grows everytime the provided data contains one or more fields which were
# not present in any of the data seen before.
self._structure = None
# References to all data seen since the writer was constructed or last reset
# (through end_episode). The number of columns always matches the number of
# leaf nodes in `_structure` but the order is not (necessarily) the same as
# `tree.flatten(_structure)` since the structure may evolve over time.
# Instead the mapping is controlled by `_path_to_column_index`. See
# `_flatten` and `_unflatten` for more details.
self._column_history: List[_ColumnHistory] = []
# Mapping from structured paths (i.e as received from
# `tree.flatten_with_path`) to position in `_column_history`. This is used
# in `_flatten`.
self._path_to_column_index: Mapping[str, int] = {}
# The inverse of `_path_to_column_index`. That is the mapping describes the
# swaps required to go from the order of `column_history` (and the C++
# writer) to the order of a sequence which can be unflattened into
# `_structure`. This is used in `_unflatten`.
self._column_index_to_flat_structure_index: Mapping[int, int] = {}
self._path_to_column_config = {}
# Set when `append` called with `partial_step=True`. Remains set until
# `append` called with `partial_step=False`. This is used to control where
# new data references are added to the history (i.e whether a new step
# should be created).
self._last_step_is_open = False
def __enter__(self) -> 'TrajectoryWriter':
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is None or errors.ReverbError not in exc_type.mro():
self.flush()
def __del__(self):
self.close()
@property
def history(self):
"""References to data, grouped by column and structured like appended data.
Allows recently added data references to be accesses with list indexing
semantics. However, instead of returning the raw references, the result is
wrapped in a TrajectoryColumn object before being returned to the caller.
```python
writer = TrajectoryWriter(...)
# Add three steps worth of data.
writer.append({'a': 1, 'b': 100})
writer.append({'a': 2, 'b': 200})
writer.append({'a': 3, 'b': 300})
# Create a trajectory using the _ColumnHistory helpers.
from_history = {
'all_a': writer.history['a'][:],
'first_b': writer.history['b'][0],
'last_b': writer.history['b'][-1],
}
writer.create_item(table='name', priority=1.0, trajectory=from_history)
```
Raises:
RuntimeError: If `append` hasn't been called at least once before.
"""
if not self._column_history:
raise RuntimeError(
'history cannot be accessed before `append` is called at least once.')
return self._unflatten(self._column_history)
@property
def episode_steps(self) -> int:
"""Number of append calls since last `end_episode` call.
This does not count partial calls to append, i.e. ones with
`partial_step=True`.
"""
if not self._column_history:
return 0
else:
return len(self._column_history[0]) - int(self._last_step_is_open)
def configure(self, path: Tuple[Union[int, str], ...],
*,
num_keep_alive_refs: int,
max_chunk_length: Optional[int]):
"""Override chunking options for a single column.
Args:
path: Structured path to the column to configure.
num_keep_alive_refs: Override value for `num_keep_alive_refs` i.e the size
of the circular buffer of the most recently added data.
max_chunk_length: Override value for the chunk length used by this column.
When set to None, an auto tuned chunk length is used. When set to a
number, a constant chunk length is used.
Raises:
ValueError: If num_keep_alive_refs is < 1.
ValueError: If max_chunk_length set to a value < 1 or to a value > than
num_keep_alive_refs.
"""
if num_keep_alive_refs < 1:
raise ValueError(
f'num_keep_alive_refs ({num_keep_alive_refs}) must be a positive '
f'integer')
if max_chunk_length is not None and (
max_chunk_length < 1 or max_chunk_length > num_keep_alive_refs):
raise ValueError(
f'max_chunk_length ({max_chunk_length}) must be None or a positive '
f'integer <= num_keep_alive_refs ({num_keep_alive_refs})')
if max_chunk_length is None:
chunker_options = pybind.AutoTunedChunkerOptions(
num_keep_alive_refs=num_keep_alive_refs,
throughput_weight=1.0)
else:
chunker_options = pybind.ConstantChunkerOptions(
max_chunk_length=max_chunk_length,
num_keep_alive_refs=num_keep_alive_refs)
if path in self._path_to_column_index:
self._writer.ConfigureChunker(self._path_to_column_index[path],
chunker_options)
else:
self._path_to_column_config[path] = chunker_options
def append(self, data: Any, *, partial_step: bool = False):
"""Columnwise append of data leaf nodes to internal buffers.
If `data` includes fields or sub structures which haven't been present in
any previous calls then the types and shapes of the new fields are extracted
and used to validate future `append` calls. The structure of `history` is
also updated to include the union of the structure across all `append`
calls.
When new fields are added after the first step then the newly created
history field will be filled with `None` in all preceding positions. This
results in the equal indexing across columns. That is `a[i]` and `b[i]`
references the same step in the sequence even if `b` was first observed
after `a` had already been seen.
It is possible to create a "step" using more than one `append` call by
setting the `partial_step` flag. Partial steps can be used when some parts
of the step becomes available only as a result of inserting (and learning
from) trajectories that include the fields available first (e.g learn from
the SARS trajectory to select the next action in an on-policy agent). In the
final `append` call of the step, `partial_step` must be set to False.
Failing to "close" the partial step will result in error as the same field
must NOT be provided more than once in the same step.
Args:
data: The (possibly nested) structure to make available for new items to
reference.
partial_step: If `True` then the step is not considered "done" with this
call. See above for more details. Defaults to `False`.
Raises:
ValueError: If the same column is provided more than once in the same
step.
"""
# Unless it is the first step, check that the structure is the same.
if self._structure is None:
self._update_structure(tree.map_structure(lambda _: None, data))
data_with_path_flat = tree.flatten_with_path(data)
try:
# Use our custom mapping to flatten the expanded structure into columns.
flat_column_data = self._reorder_like_flat_structure(data_with_path_flat)
except KeyError:
# `data` contains fields which haven't been observed before so we need
# expand the spec using the union of the history and `data`.
self._update_structure(
_tree_union(self._structure,
tree.map_structure(lambda x: None, data)))
flat_column_data = self._reorder_like_flat_structure(data_with_path_flat)
# If the last step is still open then verify that already populated columns
# are None in the new `data`.
if self._last_step_is_open:
for i, (column, column_data) in enumerate(
zip(self._column_history, flat_column_data)):
if column_data is None or column.can_set_last:
continue
raise ValueError(
f'Field {self._get_path_for_column_index(i)} has already been set '
f'in the active step by previous (partial) append call and thus '
f'must be omitted or set to None but got: {column_data}')
# Flatten the data and pass it to the C++ writer for column wise append.
if partial_step:
flat_column_data_references = self._writer.AppendPartial(flat_column_data)
else:
flat_column_data_references = self._writer.Append(flat_column_data)
# Append references to respective columns. Note that we use the expanded
# structure in order to populate the columns missing from the data with
# None.
for column, data_reference in zip(self._column_history,
flat_column_data_references):
# If the last step is still open (i.e `partial_step` was set) then we
# populate that step instead of creating a new one.
if not self._last_step_is_open:
column.append(data_reference)
elif data_reference is not None:
column.set_last(data_reference)
# Save the flag so the next `append` call either populates the same step
# or begins a new step.
self._last_step_is_open = partial_step
def create_item(self, table: str, priority: float, trajectory: Any):
"""Enqueue insertion of an item into `table` referencing `trajectory`.
Note! This method does NOT BLOCK and therefore is not impacted by the table
rate limiter. To prevent unwanted runahead, `flush` must be called.
Before creating an item, `trajectory` is validated.
* Only contain `TrajectoryColumn` objects.
* All data references must be alive (i.e not yet expired).
* Data references within a column must have the same dtype and shape.
Args:
table: Name of the table to insert the item into.
priority: The priority used for determining the sample probability of the
new item.
trajectory: A structure of `TrajectoryColumn` objects. The structure is
flattened before passed to the C++ writer.
Raises:
TypeError: If trajectory is invalid.
"""
flat_trajectory = tree.flatten(trajectory)
if not all(isinstance(col, TrajectoryColumn) for col in flat_trajectory):
raise TypeError(
f'All leaves of `trajectory` must be `TrajectoryColumn` but got '
f'{trajectory}')
# Pass the flatten trajectory to the C++ writer where it will be validated
# and if successful then the item is created and enqued for the background
# worker to send to the server.
self._writer.CreateItem(table, priority,
[list(column) for column in flat_trajectory],
[column.is_squeezed for column in flat_trajectory])
def flush(self,
block_until_num_items: int = 0,
timeout_ms: Optional[int] = None):
"""Block until all but `block_until_num_items` confirmed by the server.
There are two ways that an item could be "pending":
1. Some of the data elements referenced by the item have not yet been
finalized (and compressed) as a `ChunkData`.
2. The item has been written to the gRPC stream but the response
confirming the insertion has not yet been received.
Type 1 pending items are transformed into type 2 when flush is called by
forcing (premature) chunk finalization of the data elements referenced by
the items. This will allow the background worker to write the data and items
to the gRPC stream and turn them into type 2 pending items.
The time it takes for type 2 pending items to be confirmed is primarily
due to the state of the table rate limiter. After the items have been
written to the gRPC stream then all we can do is wait (GIL is not held).
Args:
block_until_num_items: If > 0 then this many pending items will be allowed
to remain as type 1. If the number of type 1 pending items is less than
`block_until_num_items` then we simply wait until the total number of
pending items is <= `block_until_num_items`.
timeout_ms: (optional, default is no timeout) Maximum time to block for
before unblocking and raising a `DeadlineExceededError` instead. Note
that although the block is interrupted, the insertion of the items will
proceed in the background.
Raises:
ValueError: If block_until_num_items < 0.
DeadlineExceededError: If operation did not complete before the timeout.
"""
if block_until_num_items < 0:
raise ValueError(
f'block_until_num_items must be >= 0, got {block_until_num_items}')
if timeout_ms is None:
timeout_ms = -1
try:
self._writer.Flush(block_until_num_items, timeout_ms)
except RuntimeError as e:
if 'Timeout exceeded' in str(e) and timeout_ms is not None:
raise errors.DeadlineExceededError(
f'Flush call did not complete within provided timeout of '
f'{datetime.timedelta(milliseconds=timeout_ms)}')
raise
def end_episode(self,
clear_buffers: bool = True,
timeout_ms: Optional[int] = None):
"""Flush all pending items and generate a new episode ID.
Args:
clear_buffers: Whether the history should be cleared or not. Buffers
should only not be cleared when trajectories spanning multiple episodes
are used.
timeout_ms: (optional, default is no timeout) Maximum time to block for
before unblocking and raising a `DeadlineExceededError` instead. Note
that although the block is interrupted, the buffers and episode ID are
reset all the same and the insertion of the items will proceed in the
background thread.
Raises:
DeadlineExceededError: If operation did not complete before the timeout.
"""
try:
self._writer.EndEpisode(clear_buffers, timeout_ms)
except RuntimeError as e:
if 'Timeout exceeded' in str(e) and timeout_ms is not None:
raise errors.DeadlineExceededError(
f'End episode call did not complete within provided timeout of '
f'{datetime.timedelta(milliseconds=timeout_ms)}')
raise
if clear_buffers:
for column in self._column_history:
column.reset()
def close(self):
self._writer.Close()
def _reorder_like_flat_structure(self, data_with_path_flat):
flat_data = [None] * len(self._path_to_column_index)
for path, value in data_with_path_flat:
flat_data[self._path_to_column_index[path]] = value
return flat_data
def _unflatten(self, flat_data):
reordered_flat_data = [
flat_data[self._column_index_to_flat_structure_index[i]]
for i in range(len(flat_data))
]
return tree.unflatten_as(self._structure, reordered_flat_data)
def _get_path_for_column_index(self, column_index):
i = self._column_index_to_flat_structure_index[column_index]
return tree.flatten_with_path(self._structure)[i][0]
def _update_structure(self, new_structure: Any):
"""Replace the existing structure with a superset of the current one.
Since the structure is allowed to evolve over time we are unable to simply
map flattened data to column indices. For example, if the first step is
`{'a': 1, 'c': 101}` and the second step is `{'a': 2, 'b': 12, 'c': 102}`
then the flatten data would be `[1, 101]` and `[2, 12, 102]`. This will
result in invalid behaviour as the second column (index 1) would receive `c`
in the first step and `b` in the second.
To mitigate this we maintain an explicit mapping from path -> column. The
mapping is allowed to grow over time and would in the above example be
`{'a': 0, 'c': 1}` and `{'a': 0, 'b': 2, 'c': 1}` after the first and second
step resp. Data would thus be flatten as `[1, 101]` and `[2, 102, 12]` which
means that the columns in the C++ layer only receive data from a single
field in the structure even if it evolves over time.
Args:
new_structure: The new structure to use. Must be a superset of the
previous structure.
"""
new_structure_with_path_flat = tree.flatten_with_path(new_structure)
# Evolve the mapping from structure path to column index.
for path, _ in new_structure_with_path_flat:
if path not in self._path_to_column_index:
self._path_to_column_index[path] = len(self._path_to_column_index)
# If an explicit config have been provided for the column then forward
# it to the C++ writer so it will be applied when the column chunker is
# created.
if path in self._path_to_column_config:
self._writer.ConfigureChunker(self._path_to_column_index[path],
self._path_to_column_config[path])
# Recalculate the reverse mapping, i.e column index to index within the
# flatten structure.
self._column_index_to_flat_structure_index = {
i: self._path_to_column_index[path]
for i, (path, _) in enumerate(new_structure_with_path_flat)
}
# New columns are always added to the back so all we need to do expand the
# history structure is to append one column for every field added by this
# `_update_structure` call. In order to align indexing across all columns
# we init the new fields with None for all steps up until this.
history_length = len(self._column_history[0]) if self._column_history else 0
while len(self._column_history) < len(new_structure_with_path_flat):
column_index = len(self._column_history)
self._column_history.append(
_ColumnHistory(new_structure_with_path_flat[column_index][0],
history_length))
# With the mapping and history updated the structure can be set.
self._structure = new_structure
class _ColumnHistory:
"""Utility class for building `TrajectoryColumn`s from structured history."""
def __init__(self,
path: Tuple[Union[str, int], ...],
history_padding: int = 0):
"""Constructor for _ColumnHistory.
Args:
path: A Tuple of strings and ints that represents which leaf-node this
column represents in TrajectoryWriter._structure.
history_padding: The number of Nones used to forward-pad the column's
history.
"""
self._path = path
self._data_references: Sequence[Optional[pybind.WeakCellRef]] = (
[None] * history_padding)
def append(self, ref: Optional[pybind.WeakCellRef]):
self._data_references.append(ref)
def reset(self):
self._data_references = []
def set_last(self, ref: pybind.WeakCellRef):
if not self._data_references:
raise RuntimeError('set_last called on empty history column')
if self._data_references[-1] is not None:
raise RuntimeError('set_last called on already set cell')
self._data_references[-1] = ref
@property
def can_set_last(self) -> bool:
return self._data_references and self._data_references[-1] is None
def __len__(self) -> int:
return len(self._data_references)
def __iter__(self) -> Iterator[Optional[pybind.WeakCellRef]]:
return iter(self._data_references)
def __getitem__(self, val) -> 'TrajectoryColumn':
path = self._path + (val,)
if isinstance(val, int):
return TrajectoryColumn([self._data_references[val]],
squeeze=True,
path=path)
elif isinstance(val, slice):
return TrajectoryColumn(
self._data_references[val], path=path)
else:
raise TypeError(
f'_ColumnHistory indices must be integers, not {type(val)}')
def __str__(self):
name = f'{self.__class__.__module__}.{self.__class__.__name__}'
return f'{name}(path={self._path}, refs={self._data_references})'
class TrajectoryColumn:
"""Column used for building trajectories referenced by table items."""
def __init__(self,
data_references: Sequence[pybind.WeakCellRef],
*,
squeeze: bool = False,
path: Optional[Tuple[Union[str, int, slice], ...]] = None):
if squeeze and len(data_references) != 1:
raise ValueError(
f'Columns must contain exactly one data reference when squeeze set, '
f'got {len(data_references)}')
if any(ref is None for ref in data_references):
raise ValueError('TrajectoryColumns cannot contain any None data '
f'references, got {data_references} for '
f'TrajectoryColumn at path {path}')
self._data_references = tuple(data_references)
self.is_squeezed = squeeze
def __len__(self) -> int:
return len(self._data_references)
def __iter__(self) -> Iterator[pybind.WeakCellRef]:
return iter(self._data_references)
def __getitem__(self, val) -> 'TrajectoryColumn':
if isinstance(val, int):
return TrajectoryColumn([self._data_references[val]], squeeze=True)
elif isinstance(val, slice):
return TrajectoryColumn(self._data_references[val])
else:
raise TypeError(
f'TrajectoryColumn indices must be integers or slices, '
f'not {type(val)}')
@property
def shape(self) -> Tuple[Optional[int], ...]:
if self.is_squeezed:
return self._data_references[0].shape
else:
return (len(self._data_references), *self._data_references[0].shape)
@property
def dtype(self) -> np.dtype:
return self._data_references[0].dtype
def numpy(self) -> np.ndarray:
"""Gets and stacks all the referenced data.
Data is copied from buffers in the C++ layers and may involve decompression
of already created chunks. This can be quite a memory intensive operation
when used on large arrays.
Returns:
All referenced data stacked in a single numpy array if column isn't
squeezed. If the column is squeezed then the value is returned without
stacking.
Raises:
RuntimeError: If any data reference has expired.
"""
if any(reference.expired for reference in self._data_references):
raise RuntimeError(
'Cannot convert TrajectoryColumn with expired data references to '
'numpy array.')
if self.is_squeezed:
return self._data_references[0].numpy()
return np.stack([ref.numpy() for ref in self._data_references])
def _tree_filter(source, filter_wih_path_flat):
"""Extract `filter_` from `source`."""
path_to_index = {
path: i for i, (path, _) in enumerate(filter_wih_path_flat)
}
flat_target = [None] * len(path_to_index)
for path, leaf in tree.flatten_with_path(source):
if path in path_to_index:
flat_target[path_to_index[path]] = leaf
return flat_target
def _is_named_tuple(x):
# Classes that look syntactically as if they inherit from `NamedTuple` in
# fact end up not doing so, so use this heuristic to detect them.
return isinstance(x, Tuple) and hasattr(x, '_fields')
def _tree_union(a, b):
"""Compute the disjunction of two trees with None leaves."""
if a is None:
return a
if _is_named_tuple(a):
return type(a)(**_tree_union(a._asdict(), b._asdict()))
if isinstance(a, (List, Tuple)):
return type(a)(
_tree_union(aa, bb) for aa, bb in itertools.zip_longest(a, b))
merged = {**a}
for k, v in b.items():
if k in a:
merged[k] = _tree_union(a[k], v)
else:
merged[k] = v
return type(a)(**merged)
|
apache-2.0
| -4,714,772,191,559,620,000
| 39.279476
| 81
| 0.664751
| false
| 4.039118
| false
| false
| false
|
euronmetaliaj/MarketAnalyzer
|
core/social/Objects/rake.py
|
1
|
6665
|
# Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd.
import re
import operator
debug = False
test = True
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test:
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print keywordcandidates
sortedKeywords = sorted(keywordcandidates.iteritems(), key=operator.itemgetter(1), reverse=True)
if debug: print sortedKeywords
totalKeywords = len(sortedKeywords)
if debug: print totalKeywords
print sortedKeywords[0:(totalKeywords / 3)]
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print keywords
|
mit
| -2,379,889,619,676,009,000
| 38.678571
| 580
| 0.674269
| false
| 3.769796
| false
| false
| false
|
dsapandora/die_hard
|
backup.py
|
1
|
4670
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import urllib
import requests
import math
from flask import Flask, request
app = Flask(__name__)
API_KEY = 'ebc2ccbd44d15f282010c6f3514c5c02'
API_URL = 'http://api.openweathermap.org/data/2.5/weather?'
API_QUERY = 'lat={lat}&lon={lon}&appid={api}'
# SAMPLE REQUEST
# http://
# api.openweathermap.org/data/2.5/weather?q=London,uk&appid=ebc2ccbd44d15f282010c6f3514c5c02
# API_KEY='E8D05ADD-DF71-3D14-3794-93FAF8ED8F5'
# API_URL='https://api.airmap.io/data/v1/status'
"""
curl -v -L -G \
--header "X-API-Key: 'E8D05ADD-DF71-3D14-3794-93FAF8ED8F5'" \
-d "latitude=33.9425&longitude=-118.4081&unique_id=laxexample" \
https://api.airmap.io/data/v1/status
"""
"""
curl -v -L -G \
--header "X-API-Key: fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146" \
-d "latitude=8.985955&longitude=-79.529316&radius=100000&unique_id=colexample&weather=true" \
https://api.airmap.io/data/v1/status
"""
"""
curl -v -L -G \
--header "'X-api-key: fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146" \
-d "latitude=8.983258&longitude=-79.557281&radius=100000&unique_id=colexample&weather=true" \
https://api.airmap.io/data/v1/status
Airport Tocumen:
latitude=9.088791&longitude=-79.384632
AirPort Gelabert:
8.983258, -79.557281
curl -G \
--header "X-API-Key: fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146" \
-d "latitude=8.985955&longitude=-79.529316&radius=100000&unique_id=colexample&weather=true" \
https://api.airmap.io/data/v1/status
"""
@app.route('/')
def hello_world():
return 'Flask Dockerized'
@app.route('/get_data')
def get_data():
R = 6378.1 #Radius of the Earth
brng = 1.57 #Bearing is 90 degrees converted to radians.
d = 15 #Distance in km
lat1 = math.radians(52.20472) #Current lat point converted to radians
lon1 = math.radians(0.14056) #Current long point converted to radians
lat2 = math.asin( math.sin(lat1)*math.cos(d/R) +
math.cos(lat1)*math.sin(d/R)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat1),
math.cos(d/R)-math.sin(lat1)*math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
print(lat2)
print(lon2)
return 'Get data route %s %s' % (lat2, lon2)
@app.route('/get_something_else')
def get_something_else():
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
if latitude is None:
latitude = 8.985955
if longitude is None:
longitude = -79.529316
url = API_URL + API_QUERY.format(lat=latitude, lon=longitude, api=API_KEY)
values = urllib2.urlopen(url).read()
return values
@app.route('/get_flight_zones')
def get_flight_zones():
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
if latitude is None:
latitude = 8.985955
if longitude is None:
longitude = -79.529316
url = 'https://api.airmap.io/data/v1/status?radius=360&latitude=%s&longitude=%s&unique_id=sample&weather=true' % (latitude, longitude)
headers = { 'X-API-Key': 'fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146' }
req = requests.get(url,headers=headers)
no_flight_near_me = map(lambda x: x['name'], req.json()['nearest_advisories'])
@app.route('/get_weather_data')
def get_weather_data():
"""
Weather parameters of wind speed and direction, gust speed potential, dew point, temperature and visibility.
"""
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
if latitude is None:
latitude = 8.985955
if longitude is None:
longitude = -79.529316
url = 'https://api.airmap.io/data/v1/status?radius=360&latitude=%s&longitude=%s&unique_id=sample&weather=true' % (latitude, longitude)
headers = { 'X-API-Key': 'fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146' }
req = requests.get(url,headers=headers)
return str(req.json()['weather'])
#!/usr/bin/env python
# Haversine formula example in Python
# Author: Wayne Dyck
import math
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
|
gpl-3.0
| 4,456,546,718,283,175,000
| 30.993151
| 138
| 0.679872
| false
| 2.688543
| false
| false
| false
|
ayberkt/2048
|
game.py
|
1
|
3825
|
from random import randint
from view import GridView
class Grid(object):
def __init__(self):
self.matrix = [ [2, 0, 2, 0],
[0, 0, 0, 8],
[0, 2, 0, 0],
[0, 0, 2, 4]]
self.score = 0
print "Play with WASD!"
def begin(self):
'''Start the game.'''
self.grid_view = GridView(self)
self.grid_view.initUI(self.matrix)
self.grid_view.mainloop()
def get_column(self, nth):
'''Get column at index.'''
column = []
for row in self.matrix:
column.append(row[nth])
return column
def set_column(self, nth, column):
'''Replace a column at index "nth".'''
for i in range(4):
self.matrix[i][nth] = column[i]
def insert_random_num(self):
'''Insert a random number to the grid.'''
x = randint(0, 3)
y = randint(0, 3)
while not self.matrix[y][x] == 0:
x = randint(0, 3)
y = randint(0, 3)
self.matrix[y][x] = 2
def control_state(self):
print "Score: " + str(self.score)
self.insert_random_num()
self.grid_view.layout_matrix(self.matrix)
def slide(self, direction):
''' Apply the corresponding shift to a column or row.
Columns are treated as rows thus sliding a row up is
same as shifting it left.
u for up, r for right, l for left, d for down '''
if direction == "up":
for i in range(4):
column = self.get_column(i)
column = self.shift(column, "left")
self.set_column(i, column)
elif direction == "right":
for i in range(4):
row = self.matrix[i]
row = self.shift(row, "right")
self.matrix[i] = row
elif direction == "down":
for i in range(4):
column = self.get_column(i)
column = self.shift(column, "right")
self.set_column(i, column)
elif direction == "left":
for i in range(4):
row = self.matrix[i]
row = self.shift(row, "left")
self.matrix[i] = row
self.control_state()
def shift(self, array, direction):
'''Shift an array left or right specified with the "direction" arg.
If the input array is [2, 2, 4, 8] the result would be equal
to [4, 4, 8] after a left-shift is applied.'''
# Direction should be specified as either left or right.
assert(direction == 'left' or direction == 'right')
if sum(array) == 0: return array
if direction == 'right': array = array[::-1]
array = filter(lambda x: x != 0, array)
for index in range(1, len(array)):
if array[index - 1] == array[index]:
array[index - 1] += array[index]
self.score += array[index - 1]
array[index] = 0
array = filter(lambda x: x != 0, array)
while len(array) < 4:
array.append(0)
if direction == 'left': return array
if direction == 'right': return array[::-1]
def matrix_str(self):
'''Create a string representation of the matrix
in the current state. This method is to be used
for debugging purposes.'''
matrix_str = ""
for row in self.matrix:
row_str = ""
for num in row:
if num == 0:
row_str += " . "
else:
row_str += " " + str(num) + " "
row_str += "\n"
matrix_str += row_str
return matrix_str
if __name__ == "__main__":
game_grid = Grid()
game_grid.begin()
|
mit
| 2,469,207,557,474,849,300
| 29.6
| 75
| 0.491242
| false
| 3.947368
| false
| false
| false
|
zimonkaizoku/GMTcsh2dos
|
GMTcsh2dos.py
|
1
|
3970
|
#!/usr/bin/env python
################################################################################
# GMTcsh2dos.py
# -------------------------------------------------
# Version: 0.1
# Author: Simon Dreutter
# License: GNU Generic Public License v3.0 / 2015
# -------------------------------------------------
################################################################################
#
# This is a script for translating simple GMT (Generic Mapping Tools) Unix
# csh scripts into DOS batch files. It will do some simple changes in the
# Syntax (comments, etc.) to ensure the compatibility. It is not meant for
# translating unix2dos in general, since this is not possible!
#
# Run script like so:
# GMTcsh2dos.py <Inputfile>
#
################################################################################
# import modules
import sys
#=================================================
# open GMT csh script:
#=================================================
try:
filename = sys.argv[1]
print('\nInput file: ' + filename)
except IndexError:
print('\nNo Input file specified. Canelled!\n')
sys.exit()
f = open(filename,'rb')
csh = f.read()
f.close()
#=================================================
# start with some simple replacement:
#=================================================
# ('\n','') for multiline commands
# ('\t','') for tabs inbetween command lines
# ('>!','>') for first time calling of the PS_FILE
# ('= ','=') to avoid spaces in the variable settings
# ('=>','= >') to recover '= >' in the -T option of grd2cpt
# ('rm -f','del') unix2dos syntax for deleting files
lines = csh.replace('\\\n','').replace('>!','>').replace('= ','=').replace('=>','= >').replace('rm -f','del').split('\n')
#=================================================
# go on with some more complicated replacements:
#=================================================
# counter
i = 0
# list of script variables
var = []
# loop through all lines and do stuff
for line in lines:
# delete \t in lines that are not comments
if not line.startswith('#'):
lines[i] = line.replace('\t','')
line = lines[i]
# check for lines that contain a command and a following comment and
# get rid of the comment
if '#' in line and not line.startswith('#'):
lines[i] = line.split('#')[0]
line = lines[i]
# replace comment symbols ('#','REM ')
if line.startswith('#'):
lines[i] = line.replace('#','REM ',1)
line = lines[i]
# look for variable settings and append the to var list
if line.startswith('set'):
var.append(line.split('=')[0].split(' ')[1])
# loop through all variables in each line to change '$VAR' to '%VAR%'
for v in var:
v = '$'+v
if v in line:
lines[i] = line.replace(v,'%'+v[1:]+'%')
line = lines[i]
# DOS does not accept variables within " ", therefore get rid of them
if '"%' in line:
lines[i] = line.replace('"%','%')
line = lines[i]
if '%"' in line:
lines[i] = line.replace('%"','%')
line = lines[i]
# count up
i = i + 1
#=================================================
# write .bat file:
#=================================================
# file handling
filename = filename.split('.')[0] + '.bat'
f = open(filename,'wb')
# 'echo off' to make echos visible in DOS cmd
f.write('@echo off\r\n')
# write lines but skip initial '#! /bin/csh' line and 'Preview' command line
for line in lines:
if '! /bin' in line:
continue
if 'Preview' in line:
continue
f.write(line + '\r\n')
# 'echo on'
f.write('@echo on\r\n')
# close file
f.close()
#=================================================
# all done:
#=================================================
print('Output file: ' + filename)
print('\nAll Done!\n')
|
gpl-3.0
| -2,424,475,750,428,373,000
| 29.775194
| 121
| 0.455416
| false
| 4.059305
| false
| false
| false
|
Yelp/kafka-utils
|
kafka_utils/kafka_consumer_manager/commands/list_topics.py
|
1
|
2354
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import sys
import six
from .offset_manager import OffsetManagerBase
from kafka_utils.util.client import KafkaToolClient
class ListTopics(OffsetManagerBase):
@classmethod
def setup_subparser(cls, subparsers):
parser_list_topics = subparsers.add_parser(
"list_topics",
description="List topics by consumer group.",
add_help=False
)
parser_list_topics.add_argument(
"-h", "--help", action="help",
help="Show this help message and exit."
)
parser_list_topics.add_argument(
'groupid',
help="Consumer Group ID whose topics shall be fetched."
)
parser_list_topics.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
topics_dict = cls.preprocess_args(
groupid=args.groupid,
topic=None,
partitions=None,
cluster_config=cluster_config,
client=client,
fail_on_error=False,
use_admin_client=args.use_admin_client,
)
if not topics_dict:
print("Consumer Group ID: {group} does not exist.".format(
group=args.groupid,
))
sys.exit(1)
print("Consumer Group ID: {groupid}".format(groupid=args.groupid))
for topic, partitions in six.iteritems(topics_dict):
print("\tTopic: {topic}".format(topic=topic))
print("\t\tPartitions: {partitions}".format(partitions=partitions))
|
apache-2.0
| 1,139,298,315,094,970,400
| 33.115942
| 79
| 0.639337
| false
| 4.151675
| false
| false
| false
|
Ogreman/django-termsearch
|
docs/conf.py
|
1
|
8138
|
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import termsearch
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dj-termsearch'
copyright = u'2014, James Cox'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = termsearch.__version__
# The full version, including alpha/beta/rc tags.
release = termsearch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dj-termsearchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dj-termsearch.tex', u'dj-termsearch Documentation',
u'James Cox', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dj-termsearch', u'dj-termsearch Documentation',
[u'James Cox'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dj-termsearch', u'dj-termsearch Documentation',
u'James Cox', 'dj-termsearch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
| 2,248,664,516,863,073,500
| 31.043307
| 80
| 0.706439
| false
| 3.75023
| true
| false
| false
|
jlmadurga/django-telegram-bot
|
telegrambot/bot_views/generic/responses.py
|
1
|
1680
|
from django.template import RequestContext, TemplateDoesNotExist
from django.template.loader import get_template
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove
import ast
import logging
from django.http.request import HttpRequest
logger = logging.getLogger(__name__)
class TemplateResponse(object):
def __init__(self, template_name, ctx=None):
self.template_name = template_name
if ctx is None:
self.ctx = {}
else:
self.ctx = ctx
def render(self):
if not self.template_name:
return None
try:
logger.debug("Template name: %s" % self.template_name)
template = get_template(self.template_name)
except TemplateDoesNotExist:
logger.debug("Template not found: %s" % self.template_name)
return None
# TODO: Avoid using a null HttRequest to context processors
ctx = RequestContext(HttpRequest(), self.ctx)
return template.render(ctx)
class TextResponse(TemplateResponse):
def __init__(self, template_text, ctx=None):
super(TextResponse, self).__init__(template_text, ctx)
class KeyboardResponse(TemplateResponse):
def __init__(self, template_keyboard, ctx=None):
super(KeyboardResponse, self).__init__(template_keyboard, ctx)
def render(self):
keyboard = super(KeyboardResponse, self).render()
if keyboard:
keyboard = ast.literal_eval(keyboard)
keyboard = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
else:
keyboard = ReplyKeyboardRemove()
return keyboard
|
bsd-3-clause
| -3,643,803,183,915,614,000
| 33.306122
| 74
| 0.641667
| false
| 4.60274
| false
| false
| false
|
tangentlabs/django-oscar-fancypages
|
oscar_sandbox/sandbox/settings.py
|
1
|
6442
|
# Django settings for sandbox project.
import os
import oscar_fancypages.utils as ofp
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), "../%s" % x)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
USE_LESS = True
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'db.sqlite3'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Australia/Melbourne'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = location('public/media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
location('static/'),
] + ofp.get_oscar_fancypages_paths('static')
STATIC_ROOT = location('public')
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sba9ti)x&^fkod-g91@^_yi6y_#&3mo#m5@n)i&k+0h=+zsfkb'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'fancypages.middleware.EditorMiddleware',
)
ROOT_URLCONF = 'sandbox.ofp_urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sandbox.wsgi.application'
# Compressor and pre-compiler settings for django-compressor
COMPRESS_ENABLED = DEBUG
COMPRESS_OUTPUT_DIR = 'cache'
COMPRESS_OFFLINE = False
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/less', 'lessc {infile} {outfile}'),
)
if DEBUG:
COMPRESS_JS_FILTERS = []
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = [
location('templates'),
os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),
OSCAR_MAIN_TEMPLATE_DIR,
] + ofp.get_oscar_fancypages_paths('templates')
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'debug_toolbar',
]
OFP_APPS = ofp.get_required_apps() + ofp.get_oscar_fancypages_apps()
from oscar import get_core_apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + OFP_APPS + get_core_apps()
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# Oscar settings
from oscar.defaults import *
from oscar_fancypages.defaults import *
OSCAR_ALLOW_ANON_CHECKOUT = True
OSCAR_SHOP_NAME = 'FancyPages Sandbox'
OSCAR_SHOP_TAGLINE = 'Make your pages sparkle and shine!'
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
INTERNAL_IPS = ('127.0.0.1',)
|
bsd-3-clause
| -5,179,257,276,437,302,000
| 28.550459
| 91
| 0.705061
| false
| 3.417507
| false
| false
| false
|
CZ-NIC/foris
|
foris/config_handlers/wan.py
|
1
|
17358
|
# Foris - web administration interface for OpenWrt based on NETCONF
# Copyright (C) 2017, 2020 CZ.NIC, z.s.p.o. <http://www.nic.cz>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import BaseConfigHandler
from foris import fapi, validators
from foris.state import current_state
from foris.form import Checkbox, Dropdown, Textbox, Number, PasswordWithHide
from foris.utils.translators import gettext_dummy as gettext, _
class WanHandler(BaseConfigHandler):
userfriendly_title = gettext("WAN")
def __init__(self, *args, **kwargs):
# Do not display "none" options for WAN protocol if hide_no_wan is True
self.hide_no_wan = kwargs.pop("hide_no_wan", False)
self.status_data = current_state.backend.perform("wan", "get_wan_status")
self.backend_data = current_state.backend.perform("wan", "get_settings")
super(WanHandler, self).__init__(*args, **kwargs)
@staticmethod
def _convert_backend_data_to_form_data(data):
res = {}
# WAN
# Convert none (initial setup) to dhcp (default)
res["proto"] = (
"dhcp"
if data["wan_settings"]["wan_type"] == "none"
else data["wan_settings"]["wan_type"]
)
if res["proto"] == "dhcp":
res["hostname"] = data["wan_settings"].get("wan_dhcp", {}).get("hostname", "")
elif res["proto"] == "static":
res["ipaddr"] = data["wan_settings"]["wan_static"]["ip"]
res["netmask"] = data["wan_settings"]["wan_static"]["netmask"]
res["gateway"] = data["wan_settings"]["wan_static"]["gateway"]
res["ipv4_dns1"] = data["wan_settings"]["wan_static"].get("dns1", "")
res["ipv4_dns2"] = data["wan_settings"]["wan_static"].get("dns2", "")
elif res["proto"] == "pppoe":
res["username"] = data["wan_settings"]["wan_pppoe"]["username"]
res["password"] = data["wan_settings"]["wan_pppoe"]["password"]
# WAN6
res["wan6_proto"] = data["wan6_settings"]["wan6_type"]
if res["wan6_proto"] == "static":
res["ip6addr"] = data["wan6_settings"]["wan6_static"]["ip"]
res["ip6prefix"] = data["wan6_settings"]["wan6_static"]["network"]
res["ip6gw"] = data["wan6_settings"]["wan6_static"]["gateway"]
res["ipv6_dns1"] = data["wan6_settings"]["wan6_static"].get("dns1", "")
res["ipv6_dns2"] = data["wan6_settings"]["wan6_static"].get("dns2", "")
elif res["wan6_proto"] == "dhcpv6":
res["ip6duid"] = data["wan6_settings"]["wan6_dhcpv6"]["duid"]
elif res["wan6_proto"] == "6to4":
res["6to4_ipaddr"] = data["wan6_settings"]["wan6_6to4"]["ipv4_address"]
elif res["wan6_proto"] == "6in4":
res["6in4_mtu"] = data["wan6_settings"]["wan6_6in4"]["mtu"]
res["6in4_server_ipv4"] = data["wan6_settings"]["wan6_6in4"]["server_ipv4"]
res["6in4_ipv6_prefix"] = data["wan6_settings"]["wan6_6in4"]["ipv6_prefix"]
res["6in4_dynamic_enabled"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"enabled"
]
if res["6in4_dynamic_enabled"]:
res["6in4_tunnel_id"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"tunnel_id"
]
res["6in4_username"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"username"
]
res["6in4_key"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"password_or_key"
]
# MAC
res["custom_mac"] = data["mac_settings"]["custom_mac_enabled"]
res["macaddr"] = data["mac_settings"].get("custom_mac", "")
return res
@staticmethod
def _convert_form_data_to_backend_data(data):
res = {"wan_settings": {}, "wan6_settings": {}, "mac_settings": {}}
# WAN
res["wan_settings"]["wan_type"] = data["proto"]
if data["proto"] == "dhcp":
hostname = data.get("hostname", False)
res["wan_settings"]["wan_dhcp"] = {"hostname": hostname} if hostname else {}
elif data["proto"] == "static":
res["wan_settings"]["wan_static"] = {}
res["wan_settings"]["wan_static"]["ip"] = data["ipaddr"]
res["wan_settings"]["wan_static"]["netmask"] = data["netmask"]
res["wan_settings"]["wan_static"]["gateway"] = data["gateway"]
dns1 = data.get("ipv4_dns1", None)
dns2 = data.get("ipv4_dns2", None)
res["wan_settings"]["wan_static"].update(
{k: v for k, v in {"dns1": dns1, "dns2": dns2}.items() if v}
)
elif data["proto"] == "pppoe":
res["wan_settings"]["wan_pppoe"] = {}
res["wan_settings"]["wan_pppoe"]["username"] = data["username"]
res["wan_settings"]["wan_pppoe"]["password"] = data["password"]
# WAN6
res["wan6_settings"]["wan6_type"] = data["wan6_proto"]
if data["wan6_proto"] == "static":
res["wan6_settings"]["wan6_static"] = {}
res["wan6_settings"]["wan6_static"]["ip"] = data["ip6addr"]
res["wan6_settings"]["wan6_static"]["network"] = data["ip6prefix"]
res["wan6_settings"]["wan6_static"]["gateway"] = data["ip6gw"]
dns1 = data.get("ipv6_dns1", None)
dns2 = data.get("ipv6_dns2", None)
res["wan6_settings"]["wan6_static"].update(
{k: v for k, v in {"dns1": dns1, "dns2": dns2}.items() if v}
)
if data["wan6_proto"] == "dhcpv6":
res["wan6_settings"]["wan6_dhcpv6"] = {"duid": data.get("ip6duid", "")}
if data["wan6_proto"] == "6to4":
res["wan6_settings"]["wan6_6to4"] = {"ipv4_address": data.get("6to4_ipaddr", "")}
if data["wan6_proto"] == "6in4":
dynamic = {"enabled": data.get("6in4_dynamic_enabled", False)}
if dynamic["enabled"]:
dynamic["tunnel_id"] = data.get("6in4_tunnel_id")
dynamic["username"] = data.get("6in4_username")
dynamic["password_or_key"] = data.get("6in4_key")
res["wan6_settings"]["wan6_6in4"] = {
"mtu": int(data.get("6in4_mtu")),
"ipv6_prefix": data.get("6in4_ipv6_prefix"),
"server_ipv4": data.get("6in4_server_ipv4"),
"dynamic_ipv4": dynamic,
}
# MAC
res["mac_settings"] = (
{"custom_mac_enabled": True, "custom_mac": data["macaddr"]}
if "custom_mac" in data and data["custom_mac"]
else {"custom_mac_enabled": False}
)
return res
def get_form(self):
data = WanHandler._convert_backend_data_to_form_data(self.backend_data)
if self.data:
# Update from post
data.update(self.data)
# WAN
wan_form = fapi.ForisForm("wan", data)
wan_main = wan_form.add_section(
name="set_wan",
title=_(self.userfriendly_title),
description=_(
"Here you specify your WAN port settings. Usually, you can leave these "
"options untouched unless instructed otherwise by your internet service "
"provider. Also, in case there is a cable or DSL modem connecting your "
"router to the network, it is usually not necessary to change this "
"setting."
),
)
WAN_DHCP = "dhcp"
WAN_STATIC = "static"
WAN_PPPOE = "pppoe"
WAN_OPTIONS = (
(WAN_DHCP, _("DHCP (automatic configuration)")),
(WAN_STATIC, _("Static IP address (manual configuration)")),
(WAN_PPPOE, _("PPPoE (for DSL bridges, Modem Turris, etc.)")),
)
WAN6_NONE = "none"
WAN6_DHCP = "dhcpv6"
WAN6_STATIC = "static"
WAN6_6TO4 = "6to4"
WAN6_6IN4 = "6in4"
WAN6_OPTIONS = (
(WAN6_DHCP, _("DHCPv6 (automatic configuration)")),
(WAN6_STATIC, _("Static IP address (manual configuration)")),
(WAN6_6TO4, _("6to4 (public IPv4 address required)")),
(WAN6_6IN4, _("6in4 (public IPv4 address required)")),
)
if not self.hide_no_wan:
WAN6_OPTIONS = ((WAN6_NONE, _("Disable IPv6")),) + WAN6_OPTIONS
# protocol
wan_main.add_field(
Dropdown, name="proto", label=_("IPv4 protocol"), args=WAN_OPTIONS, default=WAN_DHCP
)
# static ipv4
wan_main.add_field(
Textbox,
name="ipaddr",
label=_("IP address"),
required=True,
validators=validators.IPv4(),
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox,
name="netmask",
label=_("Network mask"),
required=True,
validators=validators.IPv4Netmask(),
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox, name="gateway", label=_("Gateway"), required=True, validators=validators.IPv4()
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox,
name="hostname",
label=_("DHCP hostname"),
validators=validators.Domain(),
hint=_("Hostname which will be provided to DHCP server."),
).requires("proto", WAN_DHCP)
# DNS servers
wan_main.add_field(
Textbox,
name="ipv4_dns1",
label=_("DNS server 1 (IPv4)"),
validators=validators.IPv4(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox,
name="ipv4_dns2",
label=_("DNS server 2 (IPv4)"),
validators=validators.IPv4(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("proto", WAN_STATIC)
# xDSL settings
wan_main.add_field(
Textbox, name="username", label=_("PAP/CHAP username"), required=True,
).requires("proto", WAN_PPPOE)
wan_main.add_field(
PasswordWithHide, name="password", label=_("PAP/CHAP password"), required=True,
).requires("proto", WAN_PPPOE)
# IPv6 configuration
wan_main.add_field(
Dropdown,
name="wan6_proto",
label=_("IPv6 protocol"),
args=WAN6_OPTIONS,
default=WAN6_NONE,
)
wan_main.add_field(
Textbox,
name="ip6addr",
label=_("IPv6 address"),
validators=validators.IPv6Prefix(),
required=True,
hint=_(
"IPv6 address and prefix length for WAN interface, " "e.g. 2001:db8:be13:37da::1/64"
),
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ip6gw",
label=_("IPv6 gateway"),
validators=validators.IPv6(),
required=True,
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ip6prefix",
label=_("IPv6 prefix"),
validators=validators.IPv6Prefix(),
hint=_("Address range for local network, " "e.g. 2001:db8:be13:37da::/64"),
).requires("wan6_proto", WAN6_STATIC)
# DNS servers
wan_main.add_field(
Textbox,
name="ipv6_dns1",
label=_("DNS server 1 (IPv6)"),
validators=validators.IPv6(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ipv6_dns2",
label=_("DNS server 2 (IPv6)"),
validators=validators.IPv6(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ip6duid",
label=_("Custom DUID"),
validators=validators.Duid(),
placeholder=self.status_data["last_seen_duid"],
hint=_("DUID which will be provided to the DHCPv6 server."),
).requires("wan6_proto", WAN6_DHCP)
wan_main.add_field(
Textbox,
name="6to4_ipaddr",
label=_("Public IPv4"),
validators=validators.IPv4(),
hint=_(
"In order to use 6to4 protocol, you might need to specify your public IPv4 "
"address manually (e.g. when your WAN interface has a private address which "
"is mapped to public IP)."
),
placeholder=_("use autodetection"),
required=False,
).requires("wan6_proto", WAN6_6TO4)
wan_main.add_field(
Textbox,
name="6in4_server_ipv4",
label=_("Provider IPv4"),
validators=validators.IPv4(),
hint=_("This address will be used as a endpoint of the tunnel on the provider's side."),
required=True,
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Textbox,
name="6in4_ipv6_prefix",
label=_("Routed IPv6 prefix"),
validators=validators.IPv6Prefix(),
hint=_("IPv6 addresses which will be routed to your network."),
required=True,
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Number,
name="6in4_mtu",
label=_("MTU"),
validators=validators.InRange(1280, 1500),
hint=_("Maximum Transmission Unit in the tunnel (in bytes)."),
required=True,
default="1480",
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Checkbox,
name="6in4_dynamic_enabled",
label=_("Dynamic IPv4 handling"),
hint=_(
"Some tunnel providers allow you to have public dynamic IPv4. "
"Note that you need to fill in some extra fields to make it work."
),
default=False,
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Textbox,
name="6in4_tunnel_id",
label=_("Tunnel ID"),
validators=validators.NotEmpty(),
hint=_("ID of your tunnel which was assigned to you by the provider."),
required=True,
).requires("6in4_dynamic_enabled", True)
wan_main.add_field(
Textbox,
name="6in4_username",
label=_("Username"),
validators=validators.NotEmpty(),
hint=_("Username which will be used to provide credentials to your tunnel provider."),
required=True,
).requires("6in4_dynamic_enabled", True)
wan_main.add_field(
Textbox,
name="6in4_key",
label=_("Key"),
validators=validators.NotEmpty(),
hint=_("Key which will be used to provide credentials to your tunnel provider."),
required=True,
).requires("6in4_dynamic_enabled", True)
# custom MAC
wan_main.add_field(
Checkbox,
name="custom_mac",
label=_("Custom MAC address"),
hint=_(
"Useful in cases, when a specific MAC address is required by "
"your internet service provider."
),
)
wan_main.add_field(
Textbox,
name="macaddr",
label=_("MAC address"),
validators=validators.MacAddress(),
required=True,
hint=_("Colon is used as a separator, for example 00:11:22:33:44:55"),
).requires("custom_mac", True)
def wan_form_cb(data):
backend_data = WanHandler._convert_form_data_to_backend_data(data)
res = current_state.backend.perform("wan", "update_settings", backend_data)
return "save_result", res # store {"result": ...} to be used later...
wan_form.add_callback(wan_form_cb)
return wan_form
|
gpl-3.0
| -9,020,859,925,960,720,000
| 39.842353
| 100
| 0.532204
| false
| 3.808249
| true
| false
| false
|
BorgERP/borg-erp-6of3
|
verticals/garage61/acy_vat_number_truncate/partner.py
|
1
|
1631
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
import tools
import os
class res_partner(osv.osv):
_inherit = 'res.partner'
def _vat_truncate(self,cr,uid,ids,name,arg,context={}):
res={}
for partner in self.browse(cr,uid,ids,context):
if partner.country == partner.company_id.partner_id.country:
res[partner.id] = partner.vat[2:]
else:
res[partner.id] = partner.vat
return res
_columns = {
'vat_truncate': fields.function(_vat_truncate, method=True, type='char', size=32, string='VAT Truncate', readonly=True),
}
res_partner()
|
agpl-3.0
| 3,920,011,529,964,214,000
| 36.953488
| 128
| 0.600858
| false
| 4.203608
| false
| false
| false
|
feend78/evennia
|
evennia/commands/cmdset.py
|
1
|
23699
|
"""
A Command Set (CmdSet) holds a set of commands. The Cmdsets can be
merged and combined to create new sets of commands in a
non-destructive way. This makes them very powerful for implementing
custom game states where different commands (or different variations
of commands) are available to the accounts depending on circumstance.
The available merge operations are partly borrowed from mathematical
Set theory.
* Union The two command sets are merged so that as many commands as
possible of each cmdset ends up in the merged cmdset. Same-name
commands are merged by priority. This is the most common default.
Ex: A1,A3 + B1,B2,B4,B5 = A1,B2,A3,B4,B5
* Intersect - Only commands found in *both* cmdsets (i.e. which have
same names) end up in the merged cmdset, with the higher-priority
cmdset replacing the lower one. Ex: A1,A3 + B1,B2,B4,B5 = A1
* Replace - The commands of this cmdset completely replaces the
lower-priority cmdset's commands, regardless of if same-name commands
exist. Ex: A1,A3 + B1,B2,B4,B5 = A1,A3
* Remove - This removes the relevant commands from the
lower-priority cmdset completely. They are not replaced with
anything, so this in effects uses the high-priority cmdset as a filter
to affect the low-priority cmdset. Ex: A1,A3 + B1,B2,B4,B5 = B2,B4,B5
"""
from future.utils import listvalues, with_metaclass
from weakref import WeakKeyDictionary
from django.utils.translation import ugettext as _
from evennia.utils.utils import inherits_from, is_iter
__all__ = ("CmdSet",)
class _CmdSetMeta(type):
"""
This metaclass makes some minor on-the-fly convenience fixes to
the cmdset class.
"""
def __init__(cls, *args, **kwargs):
"""
Fixes some things in the cmdclass
"""
# by default we key the cmdset the same as the
# name of its class.
if not hasattr(cls, 'key') or not cls.key:
cls.key = cls.__name__
cls.path = "%s.%s" % (cls.__module__, cls.__name__)
if not isinstance(cls.key_mergetypes, dict):
cls.key_mergetypes = {}
super(_CmdSetMeta, cls).__init__(*args, **kwargs)
class CmdSet(with_metaclass(_CmdSetMeta, object)):
"""
This class describes a unique cmdset that understands priorities.
CmdSets can be merged and made to perform various set operations
on each other. CmdSets have priorities that affect which of their
ingoing commands gets used.
In the examples, cmdset A always have higher priority than cmdset B.
key - the name of the cmdset. This can be used on its own for game
operations
mergetype (partly from Set theory):
Union - The two command sets are merged so that as many
commands as possible of each cmdset ends up in the
merged cmdset. Same-name commands are merged by
priority. This is the most common default.
Ex: A1,A3 + B1,B2,B4,B5 = A1,B2,A3,B4,B5
Intersect - Only commands found in *both* cmdsets
(i.e. which have same names) end up in the merged
cmdset, with the higher-priority cmdset replacing the
lower one. Ex: A1,A3 + B1,B2,B4,B5 = A1
Replace - The commands of this cmdset completely replaces
the lower-priority cmdset's commands, regardless
of if same-name commands exist.
Ex: A1,A3 + B1,B2,B4,B5 = A1,A3
Remove - This removes the relevant commands from the
lower-priority cmdset completely. They are not
replaced with anything, so this in effects uses the
high-priority cmdset as a filter to affect the
low-priority cmdset.
Ex: A1,A3 + B1,B2,B4,B5 = B2,B4,B5
Note: Commands longer than 2 characters and starting
with double underscrores, like '__noinput_command'
are considered 'system commands' and are
excempt from all merge operations - they are
ALWAYS included across mergers and only affected
if same-named system commands replace them.
priority- All cmdsets are always merged in pairs of two so that
the higher set's mergetype is applied to the
lower-priority cmdset. Default commands have priority 0,
high-priority ones like Exits and Channels have 10 and 9.
Priorities can be negative as well to give default
commands preference.
duplicates - determines what happens when two sets of equal
priority merge. Default has the first of them in the
merger (i.e. A above) automatically taking
precedence. But if allow_duplicates is true, the
result will be a merger with more than one of each
name match. This will usually lead to the account
receiving a multiple-match error higher up the road,
but can be good for things like cmdsets on non-account
objects in a room, to allow the system to warn that
more than one 'ball' in the room has the same 'kick'
command defined on it, so it may offer a chance to
select which ball to kick ... Allowing duplicates
only makes sense for Union and Intersect, the setting
is ignored for the other mergetypes.
key_mergetype (dict) - allows the cmdset to define a unique
mergetype for particular cmdsets. Format is
{CmdSetkeystring:mergetype}. Priorities still apply.
Example: {'Myevilcmdset','Replace'} which would make
sure for this set to always use 'Replace' on
Myevilcmdset no matter what overall mergetype this set
has.
no_objs - don't include any commands from nearby objects
when searching for suitable commands
no_exits - ignore the names of exits when matching against
commands
no_channels - ignore the name of channels when matching against
commands (WARNING- this is dangerous since the
account can then not even ask staff for help if
something goes wrong)
"""
key = "Unnamed CmdSet"
mergetype = "Union"
priority = 0
# These flags, if set to None, will allow "pass-through" of lower-prio settings
# of True/False. If set to True/False, will override lower-prio settings.
no_exits = None
no_objs = None
no_channels = None
# same as above, but if left at None in the final merged set, the
# cmdhandler will auto-assume True for Objects and stay False for all
# other entities.
duplicates = None
permanent = False
key_mergetypes = {}
errmessage = ""
# pre-store properties to duplicate straight off
to_duplicate = ("key", "cmdsetobj", "no_exits", "no_objs",
"no_channels", "permanent", "mergetype",
"priority", "duplicates", "errmessage")
def __init__(self, cmdsetobj=None, key=None):
"""
Creates a new CmdSet instance.
Args:
cmdsetobj (Session, Account, Object, optional): This is the database object
to which this particular instance of cmdset is related. It
is often a character but may also be a regular object, Account
or Session.
key (str, optional): The idenfier for this cmdset. This
helps if wanting to selectively remov cmdsets.
"""
if key:
self.key = key
self.commands = []
self.system_commands = []
self.actual_mergetype = self.mergetype
self.cmdsetobj = cmdsetobj
# this is set only on merged sets, in cmdhandler.py, in order to
# track, list and debug mergers correctly.
self.merged_from = []
# initialize system
self.at_cmdset_creation()
self._contains_cache = WeakKeyDictionary() # {}
# Priority-sensitive merge operations for cmdsets
def _union(self, cmdset_a, cmdset_b):
"""
Merge two sets using union merger
Args:
cmdset_a (Cmdset): Cmdset given higher priority in the case of a tie.
cmdset_b (Cmdset): Cmdset given lower priority in the case of a tie.
Returns:
cmdset_c (Cmdset): The result of A U B operation.
Notes:
Union, C = A U B, means that C gets all elements from both A and B.
"""
cmdset_c = cmdset_a._duplicate()
# we make copies, not refs by use of [:]
cmdset_c.commands = cmdset_a.commands[:]
if cmdset_a.duplicates and cmdset_a.priority == cmdset_b.priority:
cmdset_c.commands.extend(cmdset_b.commands)
else:
cmdset_c.commands.extend([cmd for cmd in cmdset_b
if cmd not in cmdset_a])
return cmdset_c
def _intersect(self, cmdset_a, cmdset_b):
"""
Merge two sets using intersection merger
Args:
cmdset_a (Cmdset): Cmdset given higher priority in the case of a tie.
cmdset_b (Cmdset): Cmdset given lower priority in the case of a tie.
Returns:
cmdset_c (Cmdset): The result of A (intersect) B operation.
Notes:
Intersection, C = A (intersect) B, means that C only gets the
parts of A and B that are the same (that is, the commands
of each set having the same name. Only the one of these
having the higher prio ends up in C).
"""
cmdset_c = cmdset_a._duplicate()
if cmdset_a.duplicates and cmdset_a.priority == cmdset_b.priority:
for cmd in [cmd for cmd in cmdset_a if cmd in cmdset_b]:
cmdset_c.add(cmd)
cmdset_c.add(cmdset_b.get(cmd))
else:
cmdset_c.commands = [cmd for cmd in cmdset_a if cmd in cmdset_b]
return cmdset_c
def _replace(self, cmdset_a, cmdset_b):
"""
Replace the contents of one set with another
Args:
cmdset_a (Cmdset): Cmdset replacing
cmdset_b (Cmdset): Cmdset to replace
Returns:
cmdset_c (Cmdset): This is indentical to cmdset_a.
Notes:
C = A, where B is ignored.
"""
cmdset_c = cmdset_a._duplicate()
cmdset_c.commands = cmdset_a.commands[:]
return cmdset_c
def _remove(self, cmdset_a, cmdset_b):
"""
Filter a set by another.
Args:
cmdset_a (Cmdset): Cmdset acting as a removal filter.
cmdset_b (Cmdset): Cmdset to filter
Returns:
cmdset_c (Cmdset): B, with all matching commands from A removed.
Notes:
C = B - A, where A is used to remove the commands of B.
"""
cmdset_c = cmdset_a._duplicate()
cmdset_c.commands = [cmd for cmd in cmdset_b if cmd not in cmdset_a]
return cmdset_c
def _instantiate(self, cmd):
"""
checks so that object is an instantiated command and not, say
a cmdclass. If it is, instantiate it. Other types, like
strings, are passed through.
Args:
cmd (any): Entity to analyze.
Returns:
result (any): An instantiated Command or the input unmodified.
"""
try:
return cmd()
except TypeError:
return cmd
def _duplicate(self):
"""
Returns a new cmdset with the same settings as this one (no
actual commands are copied over)
Returns:
cmdset (Cmdset): A copy of the current cmdset.
"""
cmdset = CmdSet()
for key, val in ((key, getattr(self, key)) for key in self.to_duplicate):
if val != getattr(cmdset, key):
# only copy if different from default; avoid turning
# class-vars into instance vars
setattr(cmdset, key, val)
cmdset.key_mergetypes = self.key_mergetypes.copy()
return cmdset
def __str__(self):
"""
Show all commands in cmdset when printing it.
Returns:
commands (str): Representation of commands in Cmdset.
"""
return ", ".join([str(cmd) for cmd in sorted(self.commands, key=lambda o:o.key)])
def __iter__(self):
"""
Allows for things like 'for cmd in cmdset':
Returns:
iterable (iter): Commands in Cmdset.
"""
return iter(self.commands)
def __contains__(self, othercmd):
"""
Returns True if this cmdset contains the given command (as
defined by command name and aliases). This allows for things
like 'if cmd in cmdset'
"""
ret = self._contains_cache.get(othercmd)
if ret is None:
ret = othercmd in self.commands
self._contains_cache[othercmd] = ret
return ret
def __add__(self, cmdset_a):
"""
Merge this cmdset (B) with another cmdset (A) using the + operator,
C = B + A
Here, we (by convention) say that 'A is merged onto B to form
C'. The actual merge operation used in the 'addition' depends
on which priorities A and B have. The one of the two with the
highest priority will apply and give its properties to C. In
the case of a tie, A takes priority and replaces the
same-named commands in B unless A has the 'duplicate' variable
set (which means both sets' commands are kept).
"""
# It's okay to merge with None
if not cmdset_a:
return self
sys_commands_a = cmdset_a.get_system_cmds()
sys_commands_b = self.get_system_cmds()
if self.priority <= cmdset_a.priority:
# A higher or equal priority to B
# preserve system __commands
sys_commands = sys_commands_a + [cmd for cmd in sys_commands_b
if cmd not in sys_commands_a]
mergetype = cmdset_a.key_mergetypes.get(self.key, cmdset_a.mergetype)
if mergetype == "Intersect":
cmdset_c = self._intersect(cmdset_a, self)
elif mergetype == "Replace":
cmdset_c = self._replace(cmdset_a, self)
elif mergetype == "Remove":
cmdset_c = self._remove(cmdset_a, self)
else: # Union
cmdset_c = self._union(cmdset_a, self)
# pass through options whenever they are set, unless the merging or higher-prio
# set changes the setting (i.e. has a non-None value). We don't pass through
# the duplicates setting; that is per-merge
cmdset_c.no_channels = self.no_channels if cmdset_a.no_channels is None else cmdset_a.no_channels
cmdset_c.no_exits = self.no_exits if cmdset_a.no_exits is None else cmdset_a.no_exits
cmdset_c.no_objs = self.no_objs if cmdset_a.no_objs is None else cmdset_a.no_objs
else:
# B higher priority than A
# preserver system __commands
sys_commands = sys_commands_b + [cmd for cmd in sys_commands_a
if cmd not in sys_commands_b]
mergetype = self.key_mergetypes.get(cmdset_a.key, self.mergetype)
if mergetype == "Intersect":
cmdset_c = self._intersect(self, cmdset_a)
elif mergetype == "Replace":
cmdset_c = self._replace(self, cmdset_a)
elif mergetype == "Remove":
cmdset_c = self._remove(self, cmdset_a)
else: # Union
cmdset_c = self._union(self, cmdset_a)
# pass through options whenever they are set, unless the higher-prio
# set changes the setting (i.e. has a non-None value). We don't pass through
# the duplicates setting; that is per-merge
cmdset_c.no_channels = cmdset_a.no_channels if self.no_channels is None else self.no_channels
cmdset_c.no_exits = cmdset_a.no_exits if self.no_exits is None else self.no_exits
cmdset_c.no_objs = cmdset_a.no_objs if self.no_objs is None else self.no_objs
# we store actual_mergetype since key_mergetypes
# might be different from the main mergetype.
# This is used for diagnosis.
cmdset_c.actual_mergetype = mergetype
# print "__add__ for %s (prio %i) called with %s (prio %i)." % (self.key, self.priority, cmdset_a.key, cmdset_a.priority)
# return the system commands to the cmdset
cmdset_c.add(sys_commands)
return cmdset_c
def add(self, cmd):
"""
Add a new command or commands to this CmdSetcommand, a list of
commands or a cmdset to this cmdset. Note that this is *not*
a merge operation (that is handled by the + operator).
Args:
cmd (Command, list, Cmdset): This allows for adding one or
more commands to this Cmdset in one go. If another Cmdset
is given, all its commands will be added.
Notes:
If cmd already exists in set, it will replace the old one
(no priority checking etc happens here). This is very useful
when overloading default commands).
If cmd is another cmdset class or -instance, the commands of
that command set is added to this one, as if they were part of
the original cmdset definition. No merging or priority checks
are made, rather later added commands will simply replace
existing ones to make a unique set.
"""
if inherits_from(cmd, "evennia.commands.cmdset.CmdSet"):
# cmd is a command set so merge all commands in that set
# to this one. We raise a visible error if we created
# an infinite loop (adding cmdset to itself somehow)
try:
cmd = self._instantiate(cmd)
except RuntimeError:
string = "Adding cmdset %(cmd)s to %(class)s lead to an "
string += "infinite loop. When adding a cmdset to another, "
string += "make sure they are not themself cyclically added to "
string += "the new cmdset somewhere in the chain."
raise RuntimeError(_(string) % {"cmd": cmd,
"class": self.__class__})
cmds = cmd.commands
elif is_iter(cmd):
cmds = [self._instantiate(c) for c in cmd]
else:
cmds = [self._instantiate(cmd)]
commands = self.commands
system_commands = self.system_commands
for cmd in cmds:
# add all commands
if not hasattr(cmd, 'obj'):
cmd.obj = self.cmdsetobj
try:
ic = commands.index(cmd)
commands[ic] = cmd # replace
except ValueError:
commands.append(cmd)
# extra run to make sure to avoid doublets
self.commands = list(set(commands))
# add system_command to separate list as well,
# for quick look-up
if cmd.key.startswith("__"):
try:
ic = system_commands.index(cmd)
system_commands[ic] = cmd # replace
except ValueError:
system_commands.append(cmd)
def remove(self, cmd):
"""
Remove a command instance from the cmdset.
Args:
cmd (Command or str): Either the Command object to remove
or the key of such a command.
"""
cmd = self._instantiate(cmd)
if cmd.key.startswith("__"):
try:
ic = self.system_commands.index(cmd)
del self.system_commands[ic]
except ValueError:
# ignore error
pass
else:
self.commands = [oldcmd for oldcmd in self.commands if oldcmd != cmd]
def get(self, cmd):
"""
Get a command from the cmdset. This is mostly useful to
check if the command is part of this cmdset or not.
Args:
cmd (Command or str): Either the Command object or its key.
Returns:
cmd (Command): The first matching Command in the set.
"""
cmd = self._instantiate(cmd)
for thiscmd in self.commands:
if thiscmd == cmd:
return thiscmd
return None
def count(self):
"""
Number of commands in set.
Returns:
N (int): Number of commands in this Cmdset.
"""
return len(self.commands)
def get_system_cmds(self):
"""
Get system commands in cmdset
Returns:
sys_cmds (list): The system commands in the set.
Notes:
As far as the Cmdset is concerned, system commands are any
commands with a key starting with double underscore __.
These are excempt from merge operations.
"""
return self.system_commands
def make_unique(self, caller):
"""
Remove duplicate command-keys (unsafe)
Args:
caller (object): Commands on this object will
get preference in the duplicate removal.
Notes:
This is an unsafe command meant to clean out a cmdset of
doublet commands after it has been created. It is useful
for commands inheriting cmdsets from the cmdhandler where
obj-based cmdsets always are added double. Doublets will
be weeded out with preference to commands defined on
caller, otherwise just by first-come-first-served.
"""
unique = {}
for cmd in self.commands:
if cmd.key in unique:
ocmd = unique[cmd.key]
if (hasattr(cmd, 'obj') and cmd.obj == caller) and not \
(hasattr(ocmd, 'obj') and ocmd.obj == caller):
unique[cmd.key] = cmd
else:
unique[cmd.key] = cmd
self.commands = listvalues(unique)
def get_all_cmd_keys_and_aliases(self, caller=None):
"""
Collects keys/aliases from commands
Args:
caller (Object, optional): If set, this is used to check access permissions
on each command. Only commands that pass are returned.
Returns:
names (list): A list of all command keys and aliases in this cmdset. If `caller`
was given, this list will only contain commands to which `caller` passed
the `call` locktype check.
"""
names = []
if caller:
[names.extend(cmd._keyaliases) for cmd in self.commands
if cmd.access(caller)]
else:
[names.extend(cmd._keyaliases) for cmd in self.commands]
return names
def at_cmdset_creation(self):
"""
Hook method - this should be overloaded in the inheriting
class, and should take care of populating the cmdset by use of
self.add().
"""
pass
|
bsd-3-clause
| -331,896,046,485,190,660
| 37.347896
| 130
| 0.581417
| false
| 4.242571
| false
| false
| false
|
endee1/gtv
|
script.gtvtvguide/ResetDatabase.py
|
1
|
2238
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Sean Poyser and Richard Dean (write2dixie@gmail.com)
#
# Modified for FTV Guide (09/2014 onwards)
# by Thomas Geppert [bluezed] - bluezed.apps@gmail.com
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import xbmc
import xbmcgui
import xbmcaddon
def deleteDB():
try:
xbmc.log("[script.gtvtvguide] Deleting database...", xbmc.LOGDEBUG)
dbPath = xbmc.translatePath(xbmcaddon.Addon(id = 'script.gtvtvguide').getAddonInfo('profile'))
dbPath = os.path.join(dbPath, 'source.db')
delete_file(dbPath)
passed = not os.path.exists(dbPath)
if passed:
xbmc.log("[script.gtvtvguide] Deleting database...PASSED", xbmc.LOGDEBUG)
else:
xbmc.log("[script.gtvtvguide] Deleting database...FAILED", xbmc.LOGDEBUG)
return passed
except Exception, e:
xbmc.log('[script.gtvtvguide] Deleting database...EXCEPTION', xbmc.LOGDEBUG)
return False
def delete_file(filename):
tries = 10
while os.path.exists(filename) and tries > 0:
try:
os.remove(filename)
break
except:
tries -= 1
if __name__ == '__main__':
if deleteDB():
d = xbmcgui.Dialog()
d.ok('gtvTV Guide', 'The database has been successfully deleted.', 'It will be re-created next time you start the guide')
else:
d = xbmcgui.Dialog()
d.ok('gtvTV Guide', 'Failed to delete database.', 'Database may be locked,', 'please restart XBMC and try again')
|
gpl-3.0
| -9,127,050,276,214,440,000
| 32.909091
| 129
| 0.668007
| false
| 3.73
| false
| false
| false
|
pombredanne/invenio
|
modules/webjournal/lib/webjournal_washer.py
|
1
|
4833
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal input parameters washing related functions
"""
import time
import re
from invenio.webjournal_config import \
InvenioWebJournalIssueNumberBadlyFormedError, \
InvenioWebJournalNoArticleNumberError, \
InvenioWebJournalArchiveDateWronglyFormedError, \
InvenioWebJournalNoPopupRecordError, \
InvenioWebJournalNoCategoryError
from invenio.webjournal_utils import \
get_current_issue, \
guess_journal_name, \
get_journal_categories, \
get_journal_nb_issues_per_year
from invenio.config import CFG_SITE_LANG
# precompiled patterns for the parameters
issue_number_pattern = re.compile("^\d{1,3}/\d{4}$")
def wash_journal_language(ln):
"""
Washes the language parameter. If there is a language, return this,
otherwise return CFG_SITE_LANG constant
"""
if ln == "":
return CFG_SITE_LANG
else:
return ln
def wash_journal_name(ln, journal_name, guess=True):
"""
Washes the journal name parameter. In case of non-empty string,
returns it, otherwise redirects to a guessing function.
If 'guess' is True the function tries to fix the capitalization of
the journal name.
"""
if guess or not journal_name:
return guess_journal_name(ln, journal_name)
else:
return journal_name
def wash_issue_number(ln, journal_name, issue_number):
"""
Washes an issue number to fit the pattern ww/YYYY, e.g. 50/2007
w/YYYY is also accepted and transformed to 0w/YYYY, e.g. 2/2007 -> 02/2007
If no issue number is found, tries to get the current issue
"""
if issue_number == "":
return get_current_issue(ln, journal_name)
else:
issue_number_match = issue_number_pattern.match(issue_number)
if issue_number_match:
issue_number = issue_number_match.group()
number, year = issue_number.split('/')
number_issues_per_year = get_journal_nb_issues_per_year(journal_name)
precision = len(str(number_issues_per_year))
return ("%0" + str(precision) + "i/%s") % (int(number), year)
else:
raise InvenioWebJournalIssueNumberBadlyFormedError(ln,
issue_number)
def wash_category(ln, category, journal_name, issue):
"""
Washes a category name.
"""
categories = get_journal_categories(journal_name, issue=None)
if category in categories:
return category
elif category == "" and len(categories) > 0:
return categories[0]
else:
raise InvenioWebJournalNoCategoryError(ln,
category,
categories)
def wash_article_number(ln, number, journal_name):
"""
Washes an article number. First checks if it is non-empty, then if it is
convertable to int. If all passes, returns the number, else throws
exception.
"""
if number == "":
raise InvenioWebJournalNoArticleNumberError(ln, journal_name)
try:
int(number)
except:
raise InvenioWebJournalNoArticleNumberError(ln, journal_name)
return number
def wash_popup_record(ln, record, journal_name):
"""
"""
if record == "":
raise InvenioWebJournalNoPopupRecordError(ln, journal_name,
"no recid")
try:
int(record)
except:
raise InvenioWebJournalNoPopupRecordError(ln, journal_name,
record)
return record
def wash_archive_date(ln, journal_name, archive_date):
"""
Washes an archive date to the form dd/mm/yyyy or empty.
"""
if archive_date == "":
return ""
try:
time.strptime(archive_date, "%d/%m/%Y")
except:
raise InvenioWebJournalArchiveDateWronglyFormedError(ln,
archive_date)
return archive_date
|
gpl-2.0
| 580,070,241,764,031,600
| 34.8
| 81
| 0.634802
| false
| 4.130769
| false
| false
| false
|
lsaffre/lino
|
lino/utils/cycler.py
|
1
|
1933
|
# -*- coding: UTF-8 -*-
# Copyright 2013-2014 by Rumma & Ko Ltd.
# License: BSD, see LICENSE for more details.
"""
Turns a list of items into an endless loop.
Useful when generating demo fixtures.
>>> from lino.utils import Cycler
>>> def myfunc():
... yield "a"
... yield "b"
... yield "c"
>>> c = Cycler(myfunc())
>>> s = ""
>>> for i in range(10):
... s += c.pop()
>>> print (s)
abcabcabca
An empty Cycler or a Cycler on an empty list will endlessly pop None values:
>>> c = Cycler()
>>> print (c.pop(), c.pop(), c.pop())
None None None
>>> c = Cycler([])
>>> print (c.pop(), c.pop(), c.pop())
None None None
>>> c = Cycler(None)
>>> print (c.pop(), c.pop(), c.pop())
None None None
"""
from __future__ import unicode_literals
from __future__ import print_function
from builtins import object
class Cycler(object):
def __init__(self, *args):
"""
If there is exactly one argument, then this must be an iterable
and will be used as the list of items to cycle on.
If there is more than one positional argument, then these
arguments themselves will be the list of items.
"""
if len(args) == 0:
self.items = []
elif len(args) == 1:
if args[0] is None:
self.items = []
else:
self.items = list(args[0])
else:
self.items = args
self.current = 0
def pop(self):
if len(self.items) == 0:
return None
item = self.items[self.current]
self.current += 1
if self.current >= len(self.items):
self.current = 0
if isinstance(item, Cycler):
return item.pop()
return item
def __len__(self):
return len(self.items)
def reset(self):
self.current = 0
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
bsd-2-clause
| 4,371,008,635,208,604,000
| 21.476744
| 76
| 0.54837
| false
| 3.546789
| false
| false
| false
|
sbremer/hybrid_rs
|
hybrid_model/models/sigmoid_item_asymfactoring.py
|
1
|
3419
|
import numpy as np
from keras.layers import Embedding, Input, Flatten, Dense
from keras.layers.merge import Concatenate, Dot, Add
from keras.models import Model
from keras.regularizers import l2
from util.layers_custom import BiasLayer
from hybrid_model.models.abstract import AbstractModelCF, bias_init
class SigmoidItemAsymFactoring(AbstractModelCF):
def __init__(self, n_users, n_items, config=None):
super().__init__(n_users, n_items, config)
self.implicit = np.zeros((self.n_users, self.n_items))
# Defaults
default = {'n_factors': 40, 'reg_bias': 0.00005, 'reg_latent': 0.00003, 'implicit_thresh': 4.0,
'implicit_thresh_crosstrain': 4.75}
default.update(self.config)
self.config = default
n_factors = self.config['n_factors']
reg_bias = l2(self.config['reg_bias'])
reg_latent = l2(self.config['reg_latent'])
self.implicit_thresh = self.config.get('implicit_thresh', 4.0)
self.implicit_thresh_crosstrain = self.config.get('implicit_thresh_crosstrain', 4.75)
input_u = Input((1,))
input_i = Input((1,))
vec_i = Embedding(self.n_items, n_factors, input_length=1, embeddings_regularizer=reg_latent)(input_i)
vec_i_r = Flatten()(vec_i)
vec_implicit = Embedding(self.n_users, self.n_items, input_length=1, trainable=False, name='implicit')(
input_u)
implicit_factors = Dense(n_factors, kernel_initializer='normal', activation='linear',
kernel_regularizer=reg_latent)(vec_implicit)
implicit_factors = Flatten()(implicit_factors)
mf = Dot(1)([implicit_factors, vec_i_r])
bias_u = Embedding(self.n_users, 1, input_length=1, embeddings_initializer='zeros',
embeddings_regularizer=reg_bias)(input_u)
bias_u_r = Flatten()(bias_u)
bias_i = Embedding(self.n_items, 1, input_length=1, embeddings_initializer='zeros',
embeddings_regularizer=reg_bias)(input_i)
bias_i_r = Flatten()(bias_i)
added = Concatenate()([bias_u_r, bias_i_r, mf])
mf_out = BiasLayer(bias_initializer=bias_init, name='bias', activation='sigmoid')(added)
self.model = Model(inputs=[input_u, input_i], outputs=mf_out)
self.compile()
def recompute_implicit(self, x, y, transformed=False, crosstrain=False):
if transformed:
if crosstrain:
thresh = self.transformation.transform(self.implicit_thresh_crosstrain)
else:
thresh = self.transformation.transform(self.implicit_thresh)
else:
if crosstrain:
thresh = self.implicit_thresh_crosstrain
else:
thresh = self.implicit_thresh
inds_u, inds_i = x
# Use ratings over the threshold as implicit feedback
for u, i, r in zip(inds_u, inds_i, y):
if r >= thresh:
self.implicit[u, i] = 1.0
# Normalize using sqrt (ref. SVD++ paper)
implicit_norm = self.implicit / np.sqrt(np.maximum(1, np.sum(self.implicit, axis=1)[:, None]))
self.model.get_layer('implicit').set_weights([implicit_norm])
def fit(self, x_train, y_train, **kwargs):
self.recompute_implicit(x_train, y_train)
return super().fit(x_train, y_train, **kwargs)
|
apache-2.0
| -9,212,830,444,832,515,000
| 37.41573
| 111
| 0.61246
| false
| 3.554054
| true
| false
| false
|
kyle8998/Practice-Coding-Questions
|
leetcode/23-Hard-Merge-K-Sorted-Lists/answer.py
|
1
|
2603
|
#!/usr/bin/env python3
#-------------------------------------------------------------------------------
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#-------------------------------------------------------------------------------
# Merge Sort Solution
#-------------------------------------------------------------------------------
class Solution:
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists: return None
def mergeTwoLists(node1, node2):
dummy = ListNode(0)
cur, cur1, cur2 = dummy, node1, node2
while cur1 and cur2:
cur.next = cur1 if cur1.val < cur2.val else cur2
if cur.next == cur1:
cur1 = cur1.next
else:
cur2 = cur2.next
cur = cur.next
cur.next = cur1 or cur2
return [dummy.next]
def mergelists(Lists):
if len(Lists) == 1:
return Lists
elif len(Lists) == 2:
return mergeTwoLists(Lists[0], Lists[1])
else:
low, high = 0, len(Lists)
mid = (low+high)//2
return mergeTwoLists(mergelists(Lists[low:mid])[0], mergelists(Lists[mid:high])[0])
return mergelists(lists)[0]
#-------------------------------------------------------------------------------
# First Solution (Time Limit Exceeded)
#-------------------------------------------------------------------------------
class Solution:
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists:
return None
for i in range(len(lists)-1, -1, -1):
if not lists[i]:
lists.pop(i)
dummy = ListNode(None)
curr = dummy
while lists:
smallest = float('inf')
idx = 0
for i in range(len(lists)-1, -1, -1):
if lists[i] and lists[i].val < smallest:
smallest = lists[i].val
idx = i
curr.next = ListNode(smallest)
curr = curr.next
lists[idx] = lists[idx].next
for i in range(len(lists)-1, -1, -1):
if not lists[i]:
lists.pop(i)
return dummy.next
#-------------------------------------------------------------------------------
|
unlicense
| -1,056,980,143,340,566,400
| 33.25
| 99
| 0.384172
| false
| 4.65653
| false
| false
| false
|
fkie-cad/FACT_core
|
src/plugins/analysis/qemu_exec/routes/routes.py
|
1
|
3219
|
import os
from contextlib import suppress
from flask import render_template_string
from flask_restx import Resource, Namespace
from helperFunctions.database import ConnectTo
from helperFunctions.fileSystem import get_src_dir
from storage.db_interface_frontend import FrontEndDbInterface
from web_interface.components.component_base import ComponentBase
from web_interface.rest.helper import error_message, success_message
from web_interface.security.decorator import roles_accepted
from web_interface.security.privileges import PRIVILEGES
from ..code.qemu_exec import AnalysisPlugin
def get_analysis_results_for_included_uid(uid, config): # pylint: disable=invalid-name
results = {}
with ConnectTo(FrontEndDbInterface, config) as db:
this_fo = db.get_object(uid)
if this_fo is not None:
for parent_uid in _get_parent_uids_from_virtual_path(this_fo):
parent_fo = db.get_object(parent_uid)
parent_results = _get_results_from_parent_fo(parent_fo, uid)
if parent_results:
results[parent_uid] = parent_results
return results
def _get_parent_uids_from_virtual_path(file_object):
result = set()
for path_list in file_object.virtual_file_path.values():
for virtual_path in path_list:
with suppress(IndexError):
result.add(virtual_path.split("|")[-2])
return result
def _get_results_from_parent_fo(parent_fo, uid):
if parent_fo is not None and \
AnalysisPlugin.NAME in parent_fo.processed_analysis and \
'files' in parent_fo.processed_analysis[AnalysisPlugin.NAME] and \
uid in parent_fo.processed_analysis[AnalysisPlugin.NAME]['files']:
return parent_fo.processed_analysis[AnalysisPlugin.NAME]['files'][uid]
return None
class PluginRoutes(ComponentBase):
def _init_component(self):
self._app.add_url_rule('/plugins/qemu_exec/ajax/<uid>', 'plugins/qemu_exec/ajax/<uid>', self._get_analysis_results_of_parent_fo)
@roles_accepted(*PRIVILEGES['view_analysis'])
def _get_analysis_results_of_parent_fo(self, uid):
results = get_analysis_results_for_included_uid(uid, self._config)
return render_template_string(self._load_view(), results=results)
@staticmethod
def _load_view():
path = os.path.join(get_src_dir(), 'plugins/analysis/{}/routes/ajax_view.html'.format(AnalysisPlugin.NAME))
with open(path, "r") as fp:
return fp.read()
api = Namespace('/plugins/qemu_exec/rest')
@api.hide
class QemuExecRoutesRest(Resource):
ENDPOINTS = [('/plugins/qemu_exec/rest/<uid>', ['GET'])]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = kwargs.get('config', None)
@roles_accepted(*PRIVILEGES['view_analysis'])
def get(self, uid):
results = get_analysis_results_for_included_uid(uid, self.config)
endpoint = self.ENDPOINTS[0][0]
if not results:
error_message('no results found for uid {}'.format(uid), endpoint, request_data={'uid': uid})
return success_message({AnalysisPlugin.NAME: results}, endpoint, request_data={'uid': uid})
|
gpl-3.0
| 604,691,004,250,413,800
| 37.783133
| 136
| 0.679714
| false
| 3.721387
| true
| false
| false
|
ericwhyne/datapop
|
datapop-publish.py
|
1
|
1446
|
#!/usr/bin/python
import sqlite3
import datapop
import sys
import codecs
import re
import time
current_milli_time = lambda: int(round(time.time() * 1000))
outfilename = 'index.html'
interval = 3 * 60 * 60 * 1000
start_time = current_milli_time() - interval
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
databasefile = 'links.db'
# Connect to local database
conn = sqlite3.connect(databasefile)
c = conn.cursor()
urls = []
query = 'SELECT url, count(url), sum(retweets), sum(favorites), sum(followers) FROM twitterlinks where timestamp_ms > ' + str(start_time)+ ' group by url ORDER BY count(url) desc limit 50'
print query
for row in c.execute(query):
(url, count, retweets, favorites, followers) = row
urls.append({'url': url, 'count': count, 'retweets': retweets, 'favorites': favorites, 'followers': followers})
conn.close()
content = []
for url in urls:
title = datapop.fetch_title(url['url'])
if title:
print url['count'], url['retweets'], url['favorites'], url['followers'], "\t", title, url['url']
title = re.sub('\|','',title)
content.append(str(url['count']) + ' | ' + title + ' | ' + "<a href='" + url['url'] + "'>" + url['url'] + "</a>")
print "\n\nWriting to file..."
outfile = codecs.open(outfilename,'w',encoding='utf8')
outfile.write("<html><h2>What's Popular in the Data World</h2><br>\n")
outfile.write("<br>\n".join(content))
outfile.write("</html>")
|
apache-2.0
| 6,614,672,527,995,097,000
| 35.15
| 188
| 0.644537
| false
| 3.242152
| false
| false
| false
|
sbobovyc/GameTools
|
TSW/src/idx.py
|
1
|
6707
|
"""
Copyright (C) 2013 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import struct
class IDX_bundle_entry:
def __init__(self):
self.RDB_type = None
self.RDB_id = None
def unpack(self, file_pointer, verbose=False):
#print hex(file_pointer.tell())
self.RDB_type, self.RDB_id = struct.unpack("<II", file_pointer.read(8))
if verbose:
print "\tRDB Type: %i RDB ID: %i" % (self.RDB_type, self.RDB_id)
class IDX_bundle_data:
def __init__(self):
self.name_length = None
self.name = None
self.num_entries = None
self.bundle_entries = []
def unpack(self, file_pointer, verbose=False):
self.name_length, = struct.unpack("<I", file_pointer.read(4))
self.name = file_pointer.read(self.name_length)
self.num_entries, = struct.unpack("<I", file_pointer.read(4))
self.num_entries /= 256
if verbose:
print "Bundle name:", self.name, "Entry count: ", self.num_entries
for entry in range(0, self.num_entries):
self.bundle_entries.append(IDX_bundle_entry().unpack(file_pointer, verbose))
class IDX_bundles:
def __init__(self):
self.num_bundles = None
self.bundle_data = []
def unpack(self, file_pointer, verbose=False):
self.num_bundles, = struct.unpack("<I", file_pointer.read(4))
if verbose:
print "Number of bundles", self.num_bundles
for bundle in range(0, self.num_bundles):
self.bundle_data.append(IDX_bundle_data().unpack(file_pointer, verbose))
file_pointer.read(1)
class IDX_entry_details:
def __init__(self):
self.RDB_file_number = None
self.unknown1 = None #Flags?
self.unknown2 = None #????
self.unknown3 = None #????
self.rdbdata_offset = None
self.entry_length = None
self.md5hash = None
def unpack(self, file_pointer, verbose=False):
self.RDB_file_number, self.unknown1, self.unknown2, self.unknown3 = struct.unpack("BBBB", file_pointer.read(4))
self.rdbdata_offset, = struct.unpack("<I", file_pointer.read(4))
self.entry_length, = struct.unpack("<I", file_pointer.read(4))
# unpack md5 hash
self.md5hash, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash = self.md5hash << 64
md5hash_lower, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash |= md5hash_lower
if verbose:
print "\tRDB file number: %i" % (self.RDB_file_number)
print "\tFlags???: 0x%x" % (self.unknown1)
print "\tUnknown: 0x%x" % (self.unknown2)
print "\tUnknown: 0x%x" % (self.unknown3)
print "\tOffset in rdbdata file: 0x%x" % (self.rdbdata_offset)
print "\tLength of entry data: %i" % (self.entry_length)
print "\tMD5:", str(hex(self.md5hash)).strip('L')
return self
class IDX_index:
def __init__(self):
self.RDB_type = None
self.RDB_id = None
def unpack(self, file_pointer, verbose=False):
self.RDB_type, self.RDB_id = struct.unpack("<II", file_pointer.read(8))
if verbose:
print "\tRDB Type: %i RDB ID: %i" % (self.RDB_type, self.RDB_id)
return self
class IDX_index_header:
def __init__(self):
self.magic = None # IBDR
self.version = None # 0x07
self.md5hash = None
self.num_indeces = None
def unpack(self, file_pointer, dest_filepath, verbose=False):
self.magic, = struct.unpack("4s", file_pointer.read(4))
self.version, = struct.unpack("<I", file_pointer.read(4))
# unpack md5 hash
self.md5hash, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash = self.md5hash << 64
md5hash_lower, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash |= md5hash_lower
self.num_indeces, = struct.unpack("<I", file_pointer.read(4))
if verbose:
print "Magic: ", self.magic
print "Version: ", self.version
print "MD5 of index data: ", str(hex(self.md5hash)).strip('L')
print "Number of indeces: ", self.num_indeces
class IDX_index_file:
def __init__(self, filepath=None):
self.filepath = filepath
self.header = None
self.indeces = []
self.entry_details = []
self.bundles = None
if self.filepath != None:
self.open(filepath)
def open(self, filepath=None):
if filepath == None and self.filepath == None:
print "File path is empty"
return
if self.filepath == None:
self.filepath = filepath
def dump(self, dest_filepath=os.getcwd(), verbose=False):
with open(self.filepath, "rb") as f:
self.header = IDX_index_header()
self.header.unpack(f, dest_filepath, verbose)
for index in range(0, self.header.num_indeces):
if verbose:
print "\tIndex: ", index
self.indeces.append(IDX_index().unpack(f, verbose))
for index in range(0, self.header.num_indeces):
if verbose:
print "Index: ", index
self.entry_details.append(IDX_entry_details().unpack(f, verbose))
self.bundles = IDX_bundles().unpack(f, verbose)
def get_indeces(self, RDB_type):
id2index = {}
for i in range(0, self.header.num_indeces):
if self.indeces[i].RDB_type == RDB_type:
id2index[self.indeces[i].RDB_id] = i
return id2index
def get_entry_details(self, index):
entry_detail = self.entry_details[index]
filename = "%02i.rdbdata" % (entry_detail.RDB_file_number)
return (filename, entry_detail.rdbdata_offset, entry_detail.entry_length)
if __name__ == "__main__":
filepath = sys.argv[1]
idx = IDX_index_file(filepath)
idx.dump(verbose=True)
|
gpl-3.0
| 6,742,526,433,551,087,000
| 36.892655
| 119
| 0.591472
| false
| 3.50418
| false
| false
| false
|
Lysxia/dissemin
|
papers/utils.py
|
1
|
16403
|
# -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from __future__ import unicode_literals
import re
import hashlib
import datetime
import unicode_tex
import unicodedata
from unidecode import unidecode
from lxml.html.clean import Cleaner
from lxml.html import fromstring, _transform_result
from lxml import etree
from io import StringIO
from titlecase import titlecase
### General string utilities ###
filter_punctuation_alphanum_regex = re.compile(r'.*\w')
def filter_punctuation(lst):
"""
:param lst: list of strings
:returns: all the strings that contain at least one alphanumeric character
>>> filter_punctuation([u'abc',u'ab.',u'/,',u'a-b',u'#=', u'0'])
[u'abc', u'ab.', u'a-b', u'0']
"""
return filter(lambda x: filter_punctuation_alphanum_regex.match(x) is not None,
lst)
def nocomma(lst):
"""
Join fields using ',' ensuring that it does not appear in the fields
This is used to output similarity graphs to be visualized with Gephi.
:param lst: list of strings
:returns: these strings joined by commas, ensuring they do not contain
commas themselves
>>> nocomma([u'a',u'b',u'cd'])
u'a,b,cd'
>>> nocomma([u'a,',u'b'])
u'a,b'
>>> nocomma([u'abc',u'',u'\\n',u'def'])
u'abc, , ,def'
"""
lst = map(lambda x: str(x).replace(',','').replace('\n',''), lst)
lst = [x or ' ' for x in lst]
return ','.join(lst)
def ulower(s):
"""
Converts to unicode and lowercase.
:param s: a string
:return: unicode(s).lower()
>>> ulower('abSc')
u'absc'
>>> ulower(None)
u'none'
>>> ulower(89)
u'89'
"""
return unicode(s).lower()
def nstrip(s):
"""
Just like unicode.strip(), but works for None too.
>>> nstrip(None) is None
True
>>> nstrip(u'aa')
u'aa'
>>> nstrip(u' aa \\n')
u'aa'
"""
return s.strip() if s else None
def remove_diacritics(s):
"""
Removes diacritics using the `unidecode` package.
:param: an str or unicode string
:returns: if str: the same string. if unicode: the unidecoded string.
>>> remove_diacritics(u'aéèï')
'aeei'
>>> remove_diacritics(u'aéè'.encode('utf-8'))
'a\\xc3\\xa9\\xc3\\xa8'
"""
return unidecode(s) if type(s) == unicode else s
def iunaccent(s):
"""
Removes diacritics and case.
>>> iunaccent(u'BÉPO forever')
'bepo forever'
"""
return remove_diacritics(s).lower()
tokenize_space_re = re.compile(r'\s+')
def tokenize(l):
"""
A (very very simple) tokenizer.
>>> tokenize(u'Hello world!')
[u'Hello', u'world!']
>>> tokenize(u'99\\tbottles\\nof beeron \\tThe Wall')
[u'99', u'bottles', u'of', u'beeron', u'The', u'Wall']
"""
return tokenize_space_re.split(l)
def maybe_recapitalize_title(title):
"""
Recapitalize a title if it is mostly uppercase
(number of uppercase letters > number of lowercase letters)
>>> maybe_recapitalize_title(u'THIS IS CALLED SCREAMING')
u'This Is Called Screaming'
>>> maybe_recapitalize_title(u'This is just a normal title')
u'This is just a normal title'
>>> maybe_recapitalize_title(u'THIS IS JUST QUITE Awkward')
u'THIS IS JUST QUITE Awkward'
"""
nb_upper, nb_lower = 0, 0
for letter in title:
if letter.isupper():
nb_upper += 1
elif letter.islower():
nb_lower += 1
if nb_upper > nb_lower:
return titlecase(title)
else:
return title
## HTML sanitizing for the title
overescaped_re = re.compile(r'&#(\d+);')
unicode4_re = re.compile(r'(\\u[0-9A-Z]{4})(?![0-9A-Z])')
whitespace_re = re.compile(r'\s+')
html_cleaner = Cleaner()
html_cleaner.allow_tags = ['sub','sup','b','span']
html_cleaner.remove_unknown_tags = False
html_killer = Cleaner()
html_killer.allow_tags = ['div']
html_killer.remove_unknown_tags = False
latexmath_re = re.compile(r'\$(\S[^$]*?\S|\S)\$')
def remove_latex_math_dollars(string):
"""
Removes LaTeX dollar tags.
>>> remove_latex_math_dollars(u'This is $\\\\beta$-reduction explained')
u'This is \\\\beta-reduction explained'
>>> remove_latex_math_dollars(u'Compare $\\\\frac{2}{3}$ to $\\\\pi$')
u'Compare \\\\frac{2}{3} to \\\\pi'
>>> remove_latex_math_dollars(u'Click here to win $100')
u'Click here to win $100'
>>> remove_latex_math_dollars(u'What do you prefer, $50 or $100?')
u'What do you prefer, $50 or $100?'
"""
return latexmath_re.sub(r'\1', string)
latex_command_re = re.compile(r'(?P<command>\\([a-zA-Z]+|[.=\'\`"])({[^}]*})*)(?P<letter>[a-zA-Z])?')
def unescape_latex(s):
"""
Replaces LaTeX symbols by their unicode counterparts using
the `unicode_tex` package.
>>> unescape_latex(u'the $\\\\alpha$-rays of $\\\\Sigma$-algebras')
u'the $\\u03b1$-rays of $\\u03a3$-algebras'
>>> unescape_latex(u'$\\textit{K}$ -trivial')
u'$\\textit{K}$ -trivial'
"""
def conditional_replace(fragment):
cmd = fragment.group('command')
letter = fragment.group('letter') or ''
rep = unicode_tex.tex_to_unicode_map.get(cmd) or cmd
# We inverse the order to handle accents.
if cmd == r"\'" or cmd == r"\`":
# We normalize back to the normal form to get only one unicode character.
return unicodedata.normalize('NFC', letter + rep)
else:
# Let's just concat.
return rep + letter
return latex_command_re.sub(conditional_replace, s)
latex_one_character_braces_re = re.compile(r'(^|(^|[^\\])\b(\w+)){(.)}', re.UNICODE)
latex_full_line_braces_re = re.compile(r'^{(.*)}$')
latex_word_braces_re = re.compile(r'(^|\s){(\w+)}($|\s)', re.UNICODE)
def remove_latex_braces(s):
"""
Removes spurious braces such as in "Th{é}odore" or "a {CADE} conference"
This should be run *after* unescape_latex
>>> remove_latex_braces(u'Th{é}odore')
u'Th\\xe9odore'
>>> remove_latex_braces(u'the {CADE} conference')
u'the CADE conference'
>>> remove_latex_braces(u'consider 2^{a+b}')
u'consider 2^{a+b}'
>>> remove_latex_braces(u'{why these braces?}')
u'why these braces?'
"""
s = latex_full_line_braces_re.sub(r'\1', s)
s = latex_word_braces_re.sub(r'\1\2\3', s)
s = latex_one_character_braces_re.sub(r'\1\4', s)
s = latex_one_character_braces_re.sub(r'\1\4', s)
s = latex_one_character_braces_re.sub(r'\1\4', s)
return s
def sanitize_html(s):
"""
Removes most HTML tags, keeping the harmless ones.
This also renders some LaTeX characters with `unescape_latex`,
fixes overescaped HTML characters, and a few other fixes.
>>> sanitize_html('My title<sub>is</sub><a href="http://dissem.in"><sup>nice</sup></a>')
u'My title<sub>is</sub><sup>nice</sup>'
>>> sanitize_html('$\\\\alpha$-conversion')
u'$\\u03b1$-conversion'
>>> sanitize_html('$$\\\\eta + \\\\omega$$')
u'$\\u03b7 + \\u03c9$'
"""
s = overescaped_re.sub(r'&#\1;', s)
s = unicode4_re.sub(lambda x: x.group(1).decode('unicode-escape'), s)
s = whitespace_re.sub(r' ', s)
s = unescape_latex(s)
s = kill_double_dollars(s)
orig = html_cleaner.clean_html('<span>'+s+'</span>')
return orig[6:-7] # We cut the <span />
def kill_html(s):
"""
Removes every tag except <div> (but there are no
<div> in titles as sanitize_html removes them)
>>> kill_html('My title<sub>is</sub><a href="http://dissem.in"><sup>nice</sup> </a>')
u'My titleisnice'
"""
orig = html_killer.clean_html('<div>'+s+'</div>')
return orig[5:-6].strip() # We cut the <div />
latex_double_dollar_re = re.compile(r'\$\$([^\$]*?)\$\$')
def kill_double_dollars(s):
"""
Removes double dollars (they generate line breaks with MathJax)
This is included in the sanitize_html function.
>>> kill_double_dollars('This equation $$\\\\mathrm{P} = \\\\mathrm{NP}$$ breaks my design')
u'This equation $\\\\mathrm{P} = \\\\mathrm{NP}$ breaks my design'
"""
s = latex_double_dollar_re.sub(r'$\1$', s)
return s
def urlize(val):
"""
Ensures a would-be URL actually starts with "http://" or "https://".
:param val: the URL
:returns: the cleaned URL
>>> urlize(u'gnu.org')
u'http://gnu.org'
>>> urlize(None) is None
True
>>> urlize(u'https://gnu.org')
u'https://gnu.org'
"""
if val and not val.startswith('http://') and not val.startswith('https://'):
val = 'http://'+val
return val
#### JSON utilities !
def jpath(path, js, default=None):
"""
XPath for JSON!
:param path: a list of keys to follow in the tree of dicts, written in a string,
separated by forward slashes
:param default: the default value to return when the key is not found
>>> jpath(u'message/items', {u'message':{u'items':u'hello'}})
u'hello'
"""
def _walk(lst, js):
if js is None:
return default
if lst == []:
return js
else:
return _walk(lst[1:], js.get(lst[0],{} if len(lst) > 1 else default))
r = _walk(path.split('/'), js)
return r
def remove_nones(dct):
"""
Return a dict, without the None values
>>> remove_nones({u'orcid':None,u'wtf':u'pl'})
{u'wtf': u'pl'}
>>> remove_nones({u'orcid':u'blah',u'hey':u'you'})
{u'orcid': u'blah', u'hey': u'you'}
>>> remove_nones({None:1})
{None: 1}
"""
return dict(filter(lambda (k,v): v is not None, dct.items()))
### Partial date representation
def try_date(year, month, day):
try:
return datetime.date(year=year, month=month, day=day)
except ValueError:
return None
def parse_int(val, default):
"""
Returns an int or a default value if parsing the int failed.
>>> parse_int(90, None)
90
>>> parse_int(None, 90)
90
>>> parse_int('est', 8)
8
"""
try:
return int(val)
except ValueError:
return default
except TypeError:
return default
def date_from_dateparts(dateparts):
"""
Constructs a date from a list of at most 3 integers.
>>> date_from_dateparts([])
datetime.date(1970, 1, 1)
>>> date_from_dateparts([2015])
datetime.date(2015, 1, 1)
>>> date_from_dateparts([2015,02])
datetime.date(2015, 2, 1)
>>> date_from_dateparts([2015,02,16])
datetime.date(2015, 2, 16)
>>> date_from_dateparts([2015,02,16])
datetime.date(2015, 2, 16)
>>> date_from_dateparts([2015,02,35])
Traceback (most recent call last):
...
ValueError: day is out of range for month
"""
year = 1970 if len(dateparts) < 1 else parse_int(dateparts[0], 1970)
month = 01 if len(dateparts) < 2 else parse_int(dateparts[1], 01)
day = 01 if len(dateparts) < 3 else parse_int(dateparts[2], 01)
return datetime.date(year=year, month=month, day=day)
def tolerant_datestamp_to_datetime(datestamp):
"""A datestamp to datetime that's more tolerant of diverse inputs.
Taken from pyoai.
>>> tolerant_datestamp_to_datetime('2016-02-11T18:34:12Z')
datetime.datetime(2016, 2, 11, 18, 34, 12)
>>> tolerant_datestamp_to_datetime('2016-02-11')
datetime.datetime(2016, 2, 11, 0, 0)
>>> tolerant_datestamp_to_datetime('2016-02')
datetime.datetime(2016, 2, 1, 0, 0)
>>> tolerant_datestamp_to_datetime('2016')
datetime.datetime(2016, 1, 1, 0, 0)
>>> tolerant_datestamp_to_datetime('2016-02-11T18:34:12') # Z needed
Traceback (most recent call last):
...
ValueError: Invalid datestamp: 2016-02-11T18:34:12
>>> tolerant_datestamp_to_datetime('2016-02-11-3') # too many numbers
Traceback (most recent call last):
...
ValueError: Invalid datestamp: 2016-02-11-3
>>> tolerant_datestamp_to_datetime('2016-02-11T18:37:09:38') # too many numbers
Traceback (most recent call last):
...
ValueError: Invalid datestamp: 2016-02-11T18:37:09:38
"""
splitted = datestamp.split('T')
if len(splitted) == 2:
d, t = splitted
# if no Z is present, raise error
if t[-1] != 'Z':
raise ValueError("Invalid datestamp: "+str(datestamp))
# split off Z at the end
t = t[:-1]
else:
d = splitted[0]
t = '00:00:00'
d_splitted = d.split('-')
if len(d_splitted) == 3:
YYYY, MM, DD = d_splitted
elif len(d_splitted) == 2:
YYYY, MM = d_splitted
DD = '01'
elif len(d_splitted) == 1:
YYYY = d_splitted[0]
MM = '01'
DD = '01'
else:
raise ValueError("Invalid datestamp: "+str(datestamp))
t_splitted = t.split(':')
if len(t_splitted) == 3:
hh, mm, ss = t_splitted
else:
raise ValueError("Invalid datestamp: "+str(datestamp))
return datetime.datetime(
int(YYYY), int(MM), int(DD), int(hh), int(mm), int(ss))
def datetime_to_date(dt):
"""
Converts a datetime or date object to a date object.
"""
if type(dt) == datetime.datetime:
return dt.date()
elif type(dt) == datetime.date:
return dt
raise ValueError("Invalid date or datetime")
### ORCiD utilities ###
orcid_re = re.compile(r'^(http://orcid.org/)?([0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{3}[X0-9])$')
def validate_orcid(orcid):
"""
:returns: a cleaned ORCiD if the argument represents a valid ORCiD, None otherwise
This does not check that the id actually exists on orcid.org,
only checks that it is syntactically valid (including the checksum).
See http://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
See the test suite for a more complete set of examples
>>> validate_orcid(u' 0000-0001-8633-6098\\n')
u'0000-0001-8633-6098'
"""
if not orcid:
return
try:
orcid = unicode(orcid).strip()
except ValueError, TypeError:
return
match = orcid_re.match(orcid)
if not match:
return
orcid = match.group(2)
nums = orcid.replace('-','')
total = 0
for i in range(15):
total = (total + int(nums[i])) * 2
checkdigit = (12 - (total % 11)) % 11
checkchar = str(checkdigit) if checkdigit != 10 else 'X'
if nums[-1] == checkchar:
return orcid
def affiliation_is_greater(a, b):
"""
Compares to affiliation values. Returns True
when the first contains more information than
the second
>>> affiliation_is_greater(None, None)
False
>>> affiliation_is_greater(None, 'UPenn')
False
>>> affiliation_is_greater('UPenn', None)
True
>>> affiliation_is_greater('0000-0001-8633-6098', 'Ecole normale superieure, Paris')
True
>>> affiliation_is_greater('Ecole normale superieure', 'Upenn')
True
"""
if a is None:
return False
if b is None:
return True
oa, ob = validate_orcid(a), validate_orcid(b)
if oa and not ob:
return True
if ob and not oa:
return False
return len(a) > len(b)
# List utilities
def index_of(elem, choices):
"""
Returns the index of elem (understood as a code) in the list of choices,
where choices are expected to be pairs of (code,verbose_description).
>>> index_of(42, [])
0
>>> index_of('ok', [('ok','This is ok'),('nok','This is definitely not OK')])
0
>>> index_of('nok', [('ok','This is ok'),('nok','This is definitely not OK')])
1
"""
for idx, (code, lbl) in enumerate(choices):
if code == elem:
return idx
else:
return 0
|
agpl-3.0
| 3,605,507,531,110,764,000
| 29.530726
| 101
| 0.604636
| false
| 3.163226
| true
| false
| false
|
Rhoana/membrane_cnn
|
assess_thresh_smooth.py
|
1
|
2798
|
import mahotas
import scipy.ndimage
import scipy.misc
import numpy as np
import gzip
import cPickle
import glob
import os
import h5py
#param_path = 'D:/dev/Rhoana/membrane_cnn/results/good3/'
param_path = 'D:/dev/Rhoana/membrane_cnn/results/stumpin/'
param_files = glob.glob(param_path + "*.h5")
target_boundaries = mahotas.imread(param_path + 'boundaries.png') > 0
offset_max = 32
target_boundaries = target_boundaries[offset_max:-offset_max,offset_max:-offset_max]
for param_file in param_files:
if param_file.find('.ot.h5') != -1:
continue
print param_file
#net_output_file = param_file.replace('.h5','\\0005_classify_output_layer6_0.tif')
net_output_file = param_file.replace('.h5','\\0100_classify_output_layer6_0.tif')
net_output = mahotas.imread(net_output_file)
net_output = np.float32(net_output) / np.max(net_output)
offset_file = param_file.replace('.h5', '.ot.h5')
h5off = h5py.File(offset_file, 'r')
best_offset = h5off['/best_offset'][...]
h5off.close()
xoffset, yoffset = best_offset
best_score = 0
best_thresh = 0
best_sigma = 0
best_result = None
offset_output = np.roll(net_output, xoffset, axis=0)
offset_output = np.roll(offset_output, yoffset, axis=1)
#Crop
offset_output = offset_output[offset_max:-offset_max,offset_max:-offset_max]
for smooth_sigma in arange(0, 3, 0.1):
smooth_output = scipy.ndimage.filters.gaussian_filter(offset_output, smooth_sigma)
for thresh in arange(0.1,1,0.1):
result = smooth_output > thresh
if np.sum(result) == 0:
continue
true_positives = np.sum(np.logical_and(result == 0, target_boundaries == 0))
false_positives = np.sum(np.logical_and(result == 0, target_boundaries > 0))
true_negatives = np.sum(np.logical_and(result > 0, target_boundaries > 0))
false_negatives = np.sum(np.logical_and(result > 0, target_boundaries == 0))
precision = float(true_positives) / float(true_positives + false_positives)
recall = float(true_positives) / float(true_positives + false_negatives)
Fscore = 2 * precision * recall / (precision + recall)
if Fscore > best_score:
best_score = Fscore
best_thresh = thresh
best_sigma = smooth_sigma
best_result = result
print 'Best score of {0} for sigma {1}, thresh {2}.'.format(best_score, best_sigma, best_thresh)
output_file = param_file.replace('.h5', '.sm.ot.h5')
h5out = h5py.File(output_file, 'w')
h5out['/best_score'] = best_score
h5out['/best_offset'] = best_offset
h5out['/best_thresh'] = best_thresh
h5out['/best_sigma'] = best_sigma
h5out.close()
|
bsd-3-clause
| 1,006,724,249,235,405,300
| 31.534884
| 100
| 0.632595
| false
| 3.190422
| false
| false
| false
|
teknolab/django.org.tr
|
apps/events/migrations/0001_initial.py
|
1
|
5772
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Event'
db.create_table('events_event', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('teaser', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('start', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('end', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('locations', self.gf('django.db.models.fields.TextField')(max_length=250, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('events', ['Event'])
def backwards(self, orm):
# Deleting model 'Event'
db.delete_table('events_event')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'ordering': "('-start',)", 'object_name': 'Event'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.TextField', [], {'max_length': '250', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['events']
|
bsd-3-clause
| -6,493,933,952,741,872,000
| 65.344828
| 182
| 0.560464
| false
| 3.709512
| false
| false
| false
|
mago1chi/cTPR
|
calc_raw_lda_result.py
|
1
|
7204
|
import psycopg2
import os, sys
TOPIC_NUM_LIST = [30, 100, 200, 500]
if len(sys.argv) is 1:
print("トピック数を入力")
exit()
topic_num = int(sys.argv[1])
if not topic_num in TOPIC_NUM_LIST:
print("入力可能なトピック数は ", end="")
for each in TOPIC_NUM_LIST:
print("{0} ".format(each), end="")
print("です.")
exit()
DBPATH = "dbname=image_tagging host=localhost user=postgres"
con = psycopg2.connect(DBPATH)
concur = con.cursor()
concur.execute('''select distinct a.tweet_id from answer as a, answer_all as b
where a.tweet_id=b.tweet_id''')
tweet_id_list = [x for x in map(lambda y: y[0], concur.fetchall())]
lda_score = {}
except_score = {}
histgram_dic = {}
query = "select distinct tag from exp_rawlda{0} where tweet_id=%s".format(topic_num)
for each_tweet_id in tweet_id_list:
concur.execute(query, (each_tweet_id,))
tag_set = { x for x in map(lambda y: y[0], concur.fetchall()) }
concur.execute('''select distinct tag from answer where tweet_id=%s''', (each_tweet_id,))
except_tag_set = { x for x in map(lambda y: y[0], concur.fetchall()) } - tag_set
good_num = 0
bad_num = 0
for each_tag in tag_set:
concur.execute('''select score from answer
where tweet_id=%s and tag=%s''', (each_tweet_id, each_tag))
score = concur.fetchone()[0]
if score is 1:
good_num += 1
else:
bad_num += 1
if not bad_num in histgram_dic.keys():
histgram_dic[bad_num] = 1
else:
histgram_dic[bad_num] += 1
except_good_num = 0
except_bad_num = 0
for each_tag in except_tag_set:
concur.execute('''select score from answer
where tweet_id=%s and tag=%s''', (each_tweet_id, each_tag))
score = concur.fetchone()[0]
if score is 1:
except_good_num += 1
else:
except_bad_num += 1
lda_score[each_tweet_id] = {'good_num': good_num, 'bad_num': bad_num}
except_score[each_tweet_id] = {'good_num': except_good_num, 'bad_num': except_bad_num}
good_rate_sum = 0
good_only_num = 0
bad_only_num = 0
good_sum = 0
bad_sum = 0
zero_num = 0
for each_tweet_id, value in lda_score.items():
each_good_num = value['good_num']
each_bad_num = value['bad_num']
good_sum += each_good_num
bad_sum += each_bad_num
if each_good_num > 0 and each_bad_num is 0:
good_only_num += 1
if each_good_num is 0 and each_bad_num > 0:
bad_only_num += 1
if each_good_num + each_bad_num == 0:
zero_num += 1
else:
good_rate_sum += each_good_num / (each_good_num + each_bad_num)
good_rate = round(good_rate_sum / (len(lda_score) - zero_num), 3)
total_good_rate = round(good_sum / (good_sum + bad_sum), 3)
except_good_sum = 0
except_bad_sum = 0
except_bad_rate_sum = 0
zero_num = 0
for each_tweet_id, value in except_score.items():
each_good_num = value['good_num']
each_bad_num = value['bad_num']
except_good_sum += each_good_num
except_bad_sum += each_bad_num
if each_good_num + each_bad_num is 0:
zero_num += 1
else:
except_bad_rate_sum += each_bad_num / (each_good_num + each_bad_num)
except_bad_rate = round(except_bad_rate_sum / (len(except_score)-zero_num), 3)
remain_bad_rate = round(bad_sum / (bad_sum + except_bad_sum), 3)
total_tag_num = good_sum + bad_sum + except_good_sum + except_bad_sum
good_only_rate = round(good_only_num / len(lda_score), 3)
good_and_bad_rate = round((len(lda_score) - bad_only_num - good_only_num) / len(lda_score), 3)
bad_only_rate = 1.0 - good_only_rate - good_and_bad_rate
print('''正解タグのみの割合: {0}({1})
正解タグとノイズ両方を含む割合: {2}
ノイズタグのみを含む割合: {3}
正解タグ含有率の平均: {4}
付与したタグのうち正解だった数: {5} / {6} = {7}
全ノイズタグのうち除去できなかったタグの数: {8} / {9} = {10}
全タグ数: {11}
'''.format(good_only_rate, len(lda_score), good_and_bad_rate, bad_only_rate, good_rate, good_sum, good_sum+bad_sum, \
total_good_rate, bad_sum, bad_sum+except_bad_sum, remain_bad_rate, total_tag_num))
good_recall_rate_sum = 0
fmeasure_sum = 0
zero_num = 0
for each_tweet_id in tweet_id_list:
each_good_num = lda_score[each_tweet_id]['good_num']
each_bad_num = lda_score[each_tweet_id]['bad_num']
each_except_good_num = except_score[each_tweet_id]['good_num']
if each_good_num + each_except_good_num is 0:
zero_num += 1
else:
if each_good_num + each_bad_num != 0:
precision = each_good_num / (each_good_num + each_bad_num)
else:
precision = 0
if each_good_num + each_except_good_num != 0:
recall = each_good_num / (each_good_num + each_except_good_num)
else:
recall = 0
good_recall_rate_sum += recall
if precision + recall != 0:
fmeasure_sum += 2*precision*recall / (precision + recall)
ave_recall_rate = round(good_recall_rate_sum / (len(lda_score)-zero_num), 3)
total_recall = round(good_sum / (good_sum+except_good_sum), 3)
good_fmeasure = round(2*total_good_rate*total_recall / (total_good_rate + total_recall), 3)
ave_good_fmeasure = round(fmeasure_sum / (len(tweet_id_list)-zero_num), 3)
print('''正解タグ
全体の適合率: {0}
全体の再現率: {1}
F値: {2}
適合率の平均: {3}
再現率の平均: {4}
F値(平均): {5}
'''.format(total_good_rate, total_recall, good_fmeasure, good_rate, ave_recall_rate, ave_good_fmeasure))
except_bad_recall_rate_sum = 0
removed_fmeasure_sum = 0
zero_num = 0
for each_tweet_id in tweet_id_list:
each_bad_num = lda_score[each_tweet_id]['bad_num']
each_except_good_num = except_score[each_tweet_id]['good_num']
each_except_bad_num = except_score[each_tweet_id]['bad_num']
if each_bad_num + each_except_bad_num is 0:
zero_num += 1
else:
if each_except_good_num + each_except_bad_num != 0:
precision = each_except_bad_num / (each_except_good_num + each_except_bad_num)
else:
precision = 0
if each_bad_num + each_except_bad_num != 0:
recall = each_except_bad_num / (each_bad_num + each_except_bad_num)
else:
recall = 0
except_bad_recall_rate_sum += recall
if precision + recall != 0:
removed_fmeasure_sum += 2*precision*recall / (precision + recall)
ave_bad_recall_rate = round(except_bad_recall_rate_sum / (len(lda_score)-zero_num), 3)
removed_bad_precision = round(except_bad_sum / (except_good_sum + except_bad_sum), 3)
removed_bad_recall = round(except_bad_sum / (bad_sum + except_bad_sum), 3)
removed_bad_fmeasure = round(2*removed_bad_precision*removed_bad_recall / (removed_bad_precision + removed_bad_recall), 3)
ave_removed_bad_fmeasure = round(removed_fmeasure_sum / (len(tweet_id_list)-zero_num), 3)
print('''除去したノイズタグ
全体の適合率: {0}
全体の再現率: {1}
F値: {2}
適合率の平均: {3}
再現率の平均: {4}
F値(平均): {5}
'''.format(removed_bad_precision, removed_bad_recall, removed_bad_fmeasure, except_bad_rate, ave_bad_recall_rate, ave_removed_bad_fmeasure))
print("提案手法適用後のノイズ数分布(トピック数:{0})".format(topic_num))
print("ノイズ数,画像数")
for k, v in histgram_dic.items():
print("{0},{1}".format(k, v))
|
gpl-2.0
| 9,060,584,416,003,994,000
| 28.017094
| 140
| 0.644772
| false
| 2.456585
| false
| false
| false
|
mfherbst/spack
|
lib/spack/llnl/util/filesystem.py
|
1
|
37492
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import collections
import errno
import hashlib
import fileinput
import glob
import grp
import numbers
import os
import pwd
import re
import shutil
import stat
import sys
import tempfile
from contextlib import contextmanager
import six
from llnl.util import tty
from llnl.util.lang import dedupe
from spack.util.executable import Executable
__all__ = [
'FileFilter',
'FileList',
'HeaderList',
'LibraryList',
'ancestor',
'can_access',
'change_sed_delimiter',
'copy_mode',
'filter_file',
'find',
'find_headers',
'find_libraries',
'find_system_libraries',
'fix_darwin_install_name',
'force_remove',
'force_symlink',
'copy',
'install',
'copy_tree',
'install_tree',
'is_exe',
'join_path',
'mkdirp',
'remove_dead_links',
'remove_if_dead_link',
'remove_linked_tree',
'set_executable',
'set_install_permissions',
'touch',
'touchp',
'traverse_tree',
'unset_executable_mode',
'working_dir'
]
def path_contains_subdirectory(path, root):
norm_root = os.path.abspath(root).rstrip(os.path.sep) + os.path.sep
norm_path = os.path.abspath(path).rstrip(os.path.sep) + os.path.sep
return norm_path.startswith(norm_root)
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file(regex, repl, *filenames, **kwargs):
r"""Like sed, but uses python regular expressions.
Filters every line of each file through regex and replaces the file
with a filtered version. Preserves mode of filtered files.
As with re.sub, ``repl`` can be either a string or a callable.
If it is a callable, it is passed the match object and should
return a suitable replacement string. If it is a string, it
can contain ``\1``, ``\2``, etc. to represent back-substitution
as sed would allow.
Parameters:
regex (str): The regular expression to search for
repl (str): The string to replace matches with
*filenames: One or more files to search and replace
Keyword Arguments:
string (bool): Treat regex as a plain string. Default it False
backup (bool): Make backup file(s) suffixed with ``~``. Default is True
ignore_absent (bool): Ignore any files that don't exist.
Default is False
"""
string = kwargs.get('string', False)
backup = kwargs.get('backup', True)
ignore_absent = kwargs.get('ignore_absent', False)
# Allow strings to use \1, \2, etc. for replacement, like sed
if not callable(repl):
unescaped = repl.replace(r'\\', '\\')
def replace_groups_with_groupid(m):
def groupid_to_group(x):
return m.group(int(x.group(1)))
return re.sub(r'\\([1-9])', groupid_to_group, unescaped)
repl = replace_groups_with_groupid
if string:
regex = re.escape(regex)
for filename in filenames:
msg = 'FILTER FILE: {0} [replacing "{1}"]'
tty.debug(msg.format(filename, regex))
backup_filename = filename + "~"
if ignore_absent and not os.path.exists(filename):
msg = 'FILTER FILE: file "{0}" not found. Skipping to next file.'
tty.debug(msg.format(filename))
continue
# Create backup file. Don't overwrite an existing backup
# file in case this file is being filtered multiple times.
if not os.path.exists(backup_filename):
shutil.copy(filename, backup_filename)
try:
for line in fileinput.input(filename, inplace=True):
print(re.sub(regex, repl, line.rstrip('\n')))
except BaseException:
# clean up the original file on failure.
shutil.move(backup_filename, filename)
raise
finally:
if not backup and os.path.exists(backup_filename):
os.remove(backup_filename)
class FileFilter(object):
"""Convenience class for calling ``filter_file`` a lot."""
def __init__(self, *filenames):
self.filenames = filenames
def filter(self, regex, repl, **kwargs):
return filter_file(regex, repl, *self.filenames, **kwargs)
def change_sed_delimiter(old_delim, new_delim, *filenames):
"""Find all sed search/replace commands and change the delimiter.
e.g., if the file contains seds that look like ``'s///'``, you can
call ``change_sed_delimiter('/', '@', file)`` to change the
delimiter to ``'@'``.
Note that this routine will fail if the delimiter is ``'`` or ``"``.
Handling those is left for future work.
Parameters:
old_delim (str): The delimiter to search for
new_delim (str): The delimiter to replace with
*filenames: One or more files to search and replace
"""
assert(len(old_delim) == 1)
assert(len(new_delim) == 1)
# TODO: handle these cases one day?
assert(old_delim != '"')
assert(old_delim != "'")
assert(new_delim != '"')
assert(new_delim != "'")
whole_lines = "^s@([^@]*)@(.*)@[gIp]$"
whole_lines = whole_lines.replace('@', old_delim)
single_quoted = r"'s@((?:\\'|[^@'])*)@((?:\\'|[^'])*)@[gIp]?'"
single_quoted = single_quoted.replace('@', old_delim)
double_quoted = r'"s@((?:\\"|[^@"])*)@((?:\\"|[^"])*)@[gIp]?"'
double_quoted = double_quoted.replace('@', old_delim)
repl = r's@\1@\2@g'
repl = repl.replace('@', new_delim)
for f in filenames:
filter_file(whole_lines, repl, f)
filter_file(single_quoted, "'%s'" % repl, f)
filter_file(double_quoted, '"%s"' % repl, f)
def set_install_permissions(path):
"""Set appropriate permissions on the installed file."""
# If this points to a file maintained in a Spack prefix, it is assumed that
# this function will be invoked on the target. If the file is outside a
# Spack-maintained prefix, the permissions should not be modified.
if os.path.islink(path):
return
if os.path.isdir(path):
os.chmod(path, 0o755)
else:
os.chmod(path, 0o644)
def group_ids(uid=None):
"""Get group ids that a uid is a member of.
Arguments:
uid (int): id of user, or None for current user
Returns:
(list of int): gids of groups the user is a member of
"""
if uid is None:
uid = os.getuid()
user = pwd.getpwuid(uid).pw_name
return [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
def copy_mode(src, dest):
"""Set the mode of dest to that of src unless it is a link.
"""
if os.path.islink(dest):
return
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
if src_mode & stat.S_IXUSR:
dest_mode |= stat.S_IXUSR
if src_mode & stat.S_IXGRP:
dest_mode |= stat.S_IXGRP
if src_mode & stat.S_IXOTH:
dest_mode |= stat.S_IXOTH
os.chmod(dest, dest_mode)
def unset_executable_mode(path):
mode = os.stat(path).st_mode
mode &= ~stat.S_IXUSR
mode &= ~stat.S_IXGRP
mode &= ~stat.S_IXOTH
os.chmod(path, mode)
def copy(src, dest, _permissions=False):
"""Copies the file *src* to the file or directory *dest*.
If *dest* specifies a directory, the file will be copied into *dest*
using the base filename from *src*.
Parameters:
src (str): the file to copy
dest (str): the destination file or directory
_permissions (bool): for internal use only
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
# Expand dest to its eventual full path if it is a directory.
if os.path.isdir(dest):
dest = join_path(dest, os.path.basename(src))
shutil.copy(src, dest)
if _permissions:
set_install_permissions(dest)
copy_mode(src, dest)
def install(src, dest):
"""Installs the file *src* to the file or directory *dest*.
Same as :py:func:`copy` with the addition of setting proper
permissions on the installed file.
Parameters:
src (str): the file to install
dest (str): the destination file or directory
"""
copy(src, dest, _permissions=True)
def copy_tree(src, dest, symlinks=True, _permissions=False):
"""Recursively copy an entire directory tree rooted at *src*.
If the destination directory *dest* does not already exist, it will
be created as well as missing parent directories.
If *symlinks* is true, symbolic links in the source tree are represented
as symbolic links in the new tree and the metadata of the original links
will be copied as far as the platform allows; if false, the contents and
metadata of the linked files are copied to the new tree.
Parameters:
src (str): the directory to copy
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
_permissions (bool): for internal use only
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
mkdirp(dest)
for s, d in traverse_tree(src, dest, order='pre', follow_nonexisting=True):
if symlinks and os.path.islink(s):
# Note that this won't rewrite absolute links into the old
# root to point at the new root. Should we handle that case?
target = os.readlink(s)
os.symlink(os.path.abspath(target), d)
elif os.path.isdir(s):
mkdirp(d)
else:
shutil.copyfile(s, d)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
def install_tree(src, dest, symlinks=True):
"""Recursively install an entire directory tree rooted at *src*.
Same as :py:func:`copy_tree` with the addition of setting proper
permissions on the installed files and directories.
Parameters:
src (str): the directory to install
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
"""
copy_tree(src, dest, symlinks, _permissions=True)
def is_exe(path):
"""True if path is an executable file."""
return os.path.isfile(path) and os.access(path, os.X_OK)
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable('file')
file.add_default_env('LC_ALL', 'C')
output = file('-b', '-h', '%s' % path_name,
output=str, error=str)
return output.strip()
def mkdirp(*paths):
"""Creates a directory, as well as parent directories if needed."""
for path in paths:
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise e
elif not os.path.isdir(path):
raise OSError(errno.EEXIST, "File already exists", path)
def force_remove(*paths):
"""Remove files without printing errors. Like ``rm -f``, does NOT
remove directories."""
for path in paths:
try:
os.remove(path)
except OSError:
pass
@contextmanager
def working_dir(dirname, **kwargs):
if kwargs.get('create', False):
mkdirp(dirname)
orig_dir = os.getcwd()
os.chdir(dirname)
yield
os.chdir(orig_dir)
@contextmanager
def replace_directory_transaction(directory_name, tmp_root=None):
"""Moves a directory to a temporary space. If the operations executed
within the context manager don't raise an exception, the directory is
deleted. If there is an exception, the move is undone.
Args:
directory_name (path): absolute path of the directory name
tmp_root (path): absolute path of the parent directory where to create
the temporary
Returns:
temporary directory where ``directory_name`` has been moved
"""
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
assert os.path.isdir(directory_name), \
'"directory_name" must be a valid directory'
assert os.path.isabs(directory_name), \
'"directory_name" must contain an absolute path'
directory_basename = os.path.basename(directory_name)
if tmp_root is not None:
assert os.path.isabs(tmp_root)
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
tty.debug('TEMPORARY DIRECTORY CREATED [{0}]'.format(tmp_dir))
shutil.move(src=directory_name, dst=tmp_dir)
tty.debug('DIRECTORY MOVED [src={0}, dest={1}]'.format(
directory_name, tmp_dir
))
try:
yield tmp_dir
except (Exception, KeyboardInterrupt, SystemExit):
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
shutil.move(
src=os.path.join(tmp_dir, directory_basename),
dst=os.path.dirname(directory_name)
)
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
msg = 'the transactional move of "{0}" failed.'
raise RuntimeError(msg.format(directory_name))
else:
# Otherwise delete the temporary directory
shutil.rmtree(tmp_dir)
tty.debug('TEMPORARY DIRECTORY DELETED [{0}]'.format(tmp_dir))
def hash_directory(directory):
"""Hashes recursively the content of a directory.
Args:
directory (path): path to a directory to be hashed
Returns:
hash of the directory content
"""
assert os.path.isdir(directory), '"directory" must be a directory!'
md5_hash = hashlib.md5()
# Adapted from https://stackoverflow.com/a/3431835/771663
for root, dirs, files in os.walk(directory):
for name in sorted(files):
filename = os.path.join(root, name)
# TODO: if caching big files becomes an issue, convert this to
# TODO: read in chunks. Currently it's used only for testing
# TODO: purposes.
with open(filename, 'rb') as f:
md5_hash.update(f.read())
return md5_hash.hexdigest()
def touch(path):
"""Creates an empty file at the specified path."""
perms = (os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY)
fd = None
try:
fd = os.open(path, perms)
os.utime(path, None)
finally:
if fd is not None:
os.close(fd)
def touchp(path):
"""Like ``touch``, but creates any parent directories needed for the file.
"""
mkdirp(os.path.dirname(path))
touch(path)
def force_symlink(src, dest):
try:
os.symlink(src, dest)
except OSError:
os.remove(dest)
os.symlink(src, dest)
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
def can_access(file_name):
"""True if we have read/write access to the file."""
return os.access(file_name, os.R_OK | os.W_OK)
def traverse_tree(source_root, dest_root, rel_path='', **kwargs):
"""Traverse two filesystem trees simultaneously.
Walks the LinkTree directory in pre or post order. Yields each
file in the source directory with a matching path from the dest
directory, along with whether the file is a directory.
e.g., for this tree::
root/
a/
file1
file2
b/
file3
When called on dest, this yields::
('root', 'dest')
('root/a', 'dest/a')
('root/a/file1', 'dest/a/file1')
('root/a/file2', 'dest/a/file2')
('root/b', 'dest/b')
('root/b/file3', 'dest/b/file3')
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are 'pre' and 'post'
ignore (str): Predicate indicating which files to ignore
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
"""
follow_nonexisting = kwargs.get('follow_nonexisting', True)
follow_links = kwargs.get('follow_link', False)
# Yield in pre or post order?
order = kwargs.get('order', 'pre')
if order not in ('pre', 'post'):
raise ValueError("Order must be 'pre' or 'post'.")
# List of relative paths to ignore under the src root.
ignore = kwargs.get('ignore', lambda filename: False)
# Don't descend into ignored directories
if ignore(rel_path):
return
source_path = os.path.join(source_root, rel_path)
dest_path = os.path.join(dest_root, rel_path)
# preorder yields directories before children
if order == 'pre':
yield (source_path, dest_path)
for f in os.listdir(source_path):
source_child = os.path.join(source_path, f)
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# Treat as a directory
if os.path.isdir(source_child) and (
follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
tuples = traverse_tree(
source_root, dest_root, rel_child, **kwargs)
for t in tuples:
yield t
# Treat as a file.
elif not ignore(os.path.join(rel_path, f)):
yield (source_child, dest_child)
if order == 'post':
yield (source_path, dest_path)
def set_executable(path):
mode = os.stat(path).st_mode
if mode & stat.S_IRUSR:
mode |= stat.S_IXUSR
if mode & stat.S_IRGRP:
mode |= stat.S_IXGRP
if mode & stat.S_IROTH:
mode |= stat.S_IXOTH
os.chmod(path, mode)
def remove_dead_links(root):
"""Removes any dead link that is present in root.
Parameters:
root (str): path where to search for dead links
"""
for file in os.listdir(root):
path = join_path(root, file)
remove_if_dead_link(path)
def remove_if_dead_link(path):
"""Removes the argument if it is a dead link.
Parameters:
path (str): The potential dead link
"""
if os.path.islink(path):
real_path = os.path.realpath(path)
if not os.path.exists(real_path):
os.unlink(path)
def remove_linked_tree(path):
"""Removes a directory and its contents.
If the directory is a symlink, follows the link and removes the real
directory before removing the link.
Parameters:
path (str): Directory to be removed
"""
if os.path.exists(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), True)
os.unlink(path)
else:
shutil.rmtree(path, True)
def fix_darwin_install_name(path):
"""Fix install name of dynamic libraries on Darwin to have full path.
There are two parts of this task:
1. Use ``install_name('-id', ...)`` to change install name of a single lib
2. Use ``install_name('-change', ...)`` to change the cross linking between
libs. The function assumes that all libraries are in one folder and
currently won't follow subfolders.
Parameters:
path (str): directory in which .dylib files are located
"""
libs = glob.glob(join_path(path, "*.dylib"))
for lib in libs:
# fix install name first:
install_name_tool = Executable('install_name_tool')
install_name_tool('-id', lib, lib)
otool = Executable('otool')
long_deps = otool('-L', lib, output=str).split('\n')
deps = [dep.partition(' ')[0][1::] for dep in long_deps[2:-1]]
# fix all dependencies:
for dep in deps:
for loc in libs:
# We really want to check for either
# dep == os.path.basename(loc) or
# dep == join_path(builddir, os.path.basename(loc)),
# but we don't know builddir (nor how symbolic links look
# in builddir). We thus only compare the basenames.
if os.path.basename(dep) == os.path.basename(loc):
install_name_tool('-change', dep, loc, lib)
break
def find(root, files, recursive=True):
"""Search for ``files`` starting from the ``root`` directory.
Like GNU/BSD find but written entirely in Python.
Examples:
.. code-block:: console
$ find /usr -name python
is equivalent to:
>>> find('/usr', 'python')
.. code-block:: console
$ find /usr/local/bin -maxdepth 1 -name python
is equivalent to:
>>> find('/usr/local/bin', 'python', recursive=False)
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
root (str): The root directory to start searching from
files (str or collections.Sequence): Library name(s) to search for
recurse (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to True.
Returns:
list of strings: The files that have been found
"""
if isinstance(files, six.string_types):
files = [files]
if recursive:
return _find_recursive(root, files)
else:
return _find_non_recursive(root, files)
def _find_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
# found in a key, and reconstructing the stable order later.
found_files = collections.defaultdict(list)
# Make the path absolute to have os.walk also return an absolute path
root = os.path.abspath(root)
for path, _, list_files in os.walk(root):
for search_file in search_files:
matches = glob.glob(os.path.join(path, search_file))
matches = [os.path.join(path, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
def _find_non_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict as os.list_dir
# can return files in any order (does not preserve stability)
found_files = collections.defaultdict(list)
# Make the path absolute to have absolute path returned
root = os.path.abspath(root)
for search_file in search_files:
matches = glob.glob(os.path.join(root, search_file))
matches = [os.path.join(root, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
# Utilities for libraries and headers
class FileList(collections.Sequence):
"""Sequence of absolute paths to files.
Provides a few convenience methods to manipulate file paths.
"""
def __init__(self, files):
if isinstance(files, six.string_types):
files = [files]
self.files = list(dedupe(files))
@property
def directories(self):
"""Stable de-duplication of the directories where the files reside.
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/libc.a'])
>>> l.directories
['/dir1', '/dir2']
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.directories
['/dir1', '/dir2']
Returns:
list of strings: A list of directories
"""
return list(dedupe(
os.path.dirname(x) for x in self.files if os.path.dirname(x)
))
@property
def basenames(self):
"""Stable de-duplication of the base-names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.a'])
>>> l.basenames
['liba.a', 'libb.a']
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.basenames
['a.h', 'b.h']
Returns:
list of strings: A list of base-names
"""
return list(dedupe(os.path.basename(x) for x in self.files))
def __getitem__(self, item):
cls = type(self)
if isinstance(item, numbers.Integral):
return self.files[item]
return cls(self.files[item])
def __add__(self, other):
return self.__class__(dedupe(self.files + list(other)))
def __radd__(self, other):
return self.__add__(other)
def __eq__(self, other):
return self.files == other.files
def __len__(self):
return len(self.files)
def joined(self, separator=' '):
return separator.join(self.files)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self.files) + ')'
def __str__(self):
return self.joined()
class HeaderList(FileList):
"""Sequence of absolute paths to headers.
Provides a few convenience methods to manipulate header paths and get
commonly used compiler flags or names.
"""
def __init__(self, files):
super(HeaderList, self).__init__(files)
self._macro_definitions = []
@property
def headers(self):
"""Stable de-duplication of the headers.
Returns:
list of strings: A list of header files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of header names in the list without extensions
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.names
['a', 'b']
Returns:
list of strings: A list of files without extensions
"""
names = []
for x in self.basenames:
name = x
# Valid extensions include: ['.cuh', '.hpp', '.hh', '.h']
for ext in ['.cuh', '.hpp', '.hh', '.h']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def include_flags(self):
"""Include flags
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.include_flags
'-I/dir1 -I/dir2'
Returns:
str: A joined list of include flags
"""
return ' '.join(['-I' + x for x in self.directories])
@property
def macro_definitions(self):
"""Macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.add_macro('-DBOOST_LIB_NAME=boost_regex')
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.macro_definitions
'-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK'
Returns:
str: A joined list of macro definitions
"""
return ' '.join(self._macro_definitions)
@property
def cpp_flags(self):
"""Include flags + macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.cpp_flags
'-I/dir1 -I/dir2'
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.cpp_flags
'-I/dir1 -I/dir2 -DBOOST_DYN_LINK'
Returns:
str: A joined list of include flags and macro definitions
"""
cpp_flags = self.include_flags
if self.macro_definitions:
cpp_flags += ' ' + self.macro_definitions
return cpp_flags
def add_macro(self, macro):
"""Add a macro definition
Parameters:
macro (str): The macro to add
"""
self._macro_definitions.append(macro)
def find_headers(headers, root, recursive=False):
"""Returns an iterable object containing a list of full paths to
headers if found.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
headers (str or list of str): Header name(s) to search for
root (str): The root directory to start searching from
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
HeaderList: The headers that have been found
"""
if isinstance(headers, six.string_types):
headers = [headers]
elif not isinstance(headers, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_headers.__name__, type(headers))
raise TypeError(message)
# Construct the right suffix for the headers
suffix = 'h'
# List of headers we are searching with suffixes
headers = ['{0}.{1}'.format(header, suffix) for header in headers]
return HeaderList(find(root, headers, recursive))
class LibraryList(FileList):
"""Sequence of absolute paths to libraries
Provides a few convenience methods to manipulate library paths and get
commonly used compiler flags or names
"""
@property
def libraries(self):
"""Stable de-duplication of library files.
Returns:
list of strings: A list of library files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of library names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.so'])
>>> l.names
['a', 'b']
Returns:
list of strings: A list of library names
"""
names = []
for x in self.basenames:
name = x
if x.startswith('lib'):
name = x[3:]
# Valid extensions include: ['.dylib', '.so', '.a']
for ext in ['.dylib', '.so', '.a']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def search_flags(self):
"""Search flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.search_flags
'-L/dir1 -L/dir2'
Returns:
str: A joined list of search flags
"""
return ' '.join(['-L' + x for x in self.directories])
@property
def link_flags(self):
"""Link flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.link_flags
'-la -lb'
Returns:
str: A joined list of link flags
"""
return ' '.join(['-l' + name for name in self.names])
@property
def ld_flags(self):
"""Search flags + link flags
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.ld_flags
'-L/dir1 -L/dir2 -la -lb'
Returns:
str: A joined list of search flags and link flags
"""
return self.search_flags + ' ' + self.link_flags
def find_system_libraries(libraries, shared=True):
"""Searches the usual system library locations for ``libraries``.
Search order is as follows:
1. ``/lib64``
2. ``/lib``
3. ``/usr/lib64``
4. ``/usr/lib``
5. ``/usr/local/lib64``
6. ``/usr/local/lib``
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_system_libraries.__name__,
type(libraries))
raise TypeError(message)
libraries_found = []
search_locations = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib',
]
for library in libraries:
for root in search_locations:
result = find_libraries(library, root, shared, recursive=True)
if result:
libraries_found += result
break
return libraries_found
def find_libraries(libraries, root, shared=True, recursive=False):
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
root (str): The root directory to start searching from
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
# Construct the right suffix for the library
if shared is True:
suffix = 'dylib' if sys.platform == 'darwin' else 'so'
else:
suffix = 'a'
# List of libraries we are searching with suffixes
libraries = ['{0}.{1}'.format(lib, suffix) for lib in libraries]
return LibraryList(find(root, libraries, recursive))
|
lgpl-2.1
| 536,191,456,906,617,700
| 29.985124
| 79
| 0.591219
| false
| 3.925864
| false
| false
| false
|
matejcik/weblate
|
weblate/trans/mixins.py
|
1
|
5486
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from django.core.urlresolvers import reverse
from weblate.logger import LOGGER
class PercentMixin(object):
"""
Defines API to getting percentage status of translations.
"""
_percents = None
def get_percents(self):
"""
Returns percentages of translation status.
"""
if self._percents is None:
self._percents = self._get_percents()
return self._percents
def _get_percents(self):
"""
Returns percentages of translation status.
"""
raise NotImplementedError()
def get_translated_percent(self):
"""
Returns percent of translated strings.
"""
return self.get_percents()[0]
def get_untranslated_percent(self):
"""
Returns percent of untranslated strings.
"""
return 100 - self.get_percents()[0]
def get_fuzzy_percent(self):
"""
Returns percent of fuzzy strings.
"""
return self.get_percents()[1]
def get_failing_checks_percent(self):
"""
Returns percentage of failed checks.
"""
return self.get_percents()[2]
class URLMixin(object):
"""
Mixin providing standard shortcut API for few standard URLs
"""
def _reverse_url_name(self):
"""
Returns base name for URL reversing.
"""
raise NotImplementedError()
def _reverse_url_kwargs(self):
"""
Returns kwargs for URL reversing.
"""
raise NotImplementedError()
def reverse_url(self, name=None):
"""
Generic reverser for URL.
"""
if name is None:
urlname = self._reverse_url_name()
else:
urlname = '%s_%s' % (
name,
self._reverse_url_name()
)
return reverse(
urlname,
kwargs=self._reverse_url_kwargs()
)
def get_absolute_url(self):
return self.reverse_url()
def get_commit_url(self):
return self.reverse_url('commit')
def get_update_url(self):
return self.reverse_url('update')
def get_push_url(self):
return self.reverse_url('push')
def get_reset_url(self):
return self.reverse_url('reset')
def get_lock_url(self):
return self.reverse_url('lock')
def get_unlock_url(self):
return self.reverse_url('unlock')
class LoggerMixin(object):
"""
Mixin with logging.
"""
@property
def log_prefix(self):
return 'default: '
def log_debug(self, msg, *args):
return LOGGER.debug(
self.log_prefix + msg, *args
)
def log_info(self, msg, *args):
return LOGGER.info(
self.log_prefix + msg, *args
)
def log_warning(self, msg, *args):
return LOGGER.warning(
self.log_prefix + msg, *args
)
def log_error(self, msg, *args):
return LOGGER.error(
self.log_prefix + msg, *args
)
class PathMixin(LoggerMixin):
"""
Mixin for path manipulations.
"""
_dir_path = None
_linked_subproject = None
def _get_path(self):
"""
Actual calculation of path.
"""
raise NotImplementedError()
def get_path(self):
"""
Return path to directory.
Caching is really necessary for linked project, otherwise
we end up fetching linked subproject again and again.
"""
if self._dir_path is None:
self._dir_path = self._get_path()
return self._dir_path
def check_rename(self, old):
"""
Detects slug changes and possibly renames underlaying directory.
"""
# No moving for links
if getattr(self, 'is_repo_link', False):
return
old_path = old.get_path()
# Invalidate path cache (otherwise we would still get old path)
self._dir_path = None
new_path = self.get_path()
if old_path != new_path:
self.log_info(
'path changed from %s to %s', old_path, new_path
)
if os.path.exists(old_path) and not os.path.exists(new_path):
self.log_info(
'renaming "%s" to "%s"', old_path, new_path
)
os.rename(old_path, new_path)
# Clean subproject cache on rename
self._linked_subproject = None
def create_path(self):
"""
Create filesystem directory for storing data
"""
path = self.get_path()
if not os.path.exists(path):
os.makedirs(path)
|
gpl-3.0
| -2,377,593,369,573,738,500
| 24.621495
| 73
| 0.572132
| false
| 4.125658
| false
| false
| false
|
bsipocz/astropy
|
astropy/_erfa/erfa_generator.py
|
1
|
27369
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module's main purpose is to act as a script to create new versions
of ufunc.c when ERFA is updated (or this generator is enhanced).
`Jinja2 <http://jinja.pocoo.org/>`_ must be installed for this
module/script to function.
Note that this does *not* currently automate the process of creating structs
or dtypes for those structs. They should be added manually in the template file.
"""
import re
import os.path
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy
# Note: once we support only numpy >=1.16, all things related to "d3_fix"
# can be removed, here and in the templates (core.py.templ
# NOTE: we define this variable here instead of importing from astropy to
# ensure that running this script does not require importing astropy.
NUMPY_LT_1_16 = LooseVersion(numpy.__version__) < '1.16'
DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0],
'../../cextern/erfa')
DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0]
NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*', '.*').replace(r'\<', '(').replace(r'\>', ')'))
class FunctionDoc:
def __init__(self, doc):
self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "")
self.__input = None
self.__output = None
self.__ret_info = None
def _get_arg_doc_list(self, doc_lines):
"""Parse input/output doc section lines, getting arguments from them.
Ensure all elements of eraASTROM and eraLDBODY are left out, as those
are not input or output arguments themselves. Also remove the nb
argument in from of eraLDBODY, as we infer nb from the python array.
"""
doc_list = []
skip = []
for d in doc_lines:
arg_doc = ArgumentDoc(d)
if arg_doc.name is not None:
if skip:
if skip[0] == arg_doc.name:
skip.pop(0)
continue
else:
raise RuntimeError("We whould be skipping {} "
"but {} encountered."
.format(skip[0], arg_doc.name))
if arg_doc.type.startswith('eraLDBODY'):
# Special-case LDBODY: for those, the previous argument
# is always the number of bodies, but we don't need it
# as an input argument for the ufunc since we're going
# to determine this from the array itself. Also skip
# the description of its contents; those are not arguments.
doc_list.pop()
skip = ['bm', 'dl', 'pv']
elif arg_doc.type.startswith('eraASTROM'):
# Special-case ASTROM: need to skip the description
# of its contents; those are not arguments.
skip = ['pmt', 'eb', 'eh', 'em', 'v', 'bm1',
'bpn', 'along', 'xpl', 'ypl', 'sphi',
'cphi', 'diurab', 'eral', 'refa', 'refb']
doc_list.append(arg_doc)
return doc_list
@property
def input(self):
if self.__input is None:
self.__input = []
for regex in ("Given([^\n]*):\n(.+?) \n",
"Given and returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__input += self._get_arg_doc_list(doc_lines)
return self.__input
@property
def output(self):
if self.__output is None:
self.__output = []
for regex in ("Given and returned([^\n]*):\n(.+?) \n",
"Returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__output += self._get_arg_doc_list(doc_lines)
return self.__output
@property
def ret_info(self):
if self.__ret_info is None:
ret_info = []
result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
ret_info.append(ReturnDoc(result.group(2)))
if len(ret_info) == 0:
self.__ret_info = ''
elif len(ret_info) == 1:
self.__ret_info = ret_info[0]
else:
raise ValueError("Multiple C return sections found in this doc:\n" + self.doc)
return self.__ret_info
def __repr__(self):
return self.doc.replace(" \n", "\n")
class ArgumentDoc:
def __init__(self, doc):
match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc)
if match is not None:
self.name = match.group(1)
self.type = match.group(2)
self.doc = match.group(3)
else:
self.name = None
self.type = None
self.doc = None
def __repr__(self):
return f" {self.name:15} {self.type:15} {self.doc}"
class Variable:
"""Properties shared by Argument and Return."""
@property
def npy_type(self):
"""Predefined type used by numpy ufuncs to indicate a given ctype.
Eg., NPY_DOUBLE for double.
"""
return "NPY_" + self.ctype.upper()
@property
def dtype(self):
"""Name of dtype corresponding to the ctype.
Specifically,
double : dt_double
int : dt_int
double[3]: dt_vector
double[2][3] : dt_pv
double[2] : dt_pvdpv
double[3][3] : dt_matrix
int[4] : dt_ymdf | dt_hmsf | dt_dmsf, depding on name
eraASTROM: dt_eraASTROM
eraLDBODY: dt_eraLDBODY
char : dt_sign
char[] : dt_type
The corresponding dtypes are defined in ufunc.c, where they are
used for the loop definitions. In core.py, they are also used
to view-cast regular arrays to these structured dtypes.
"""
if self.ctype == 'const char':
return 'dt_type'
elif self.ctype == 'char':
return 'dt_sign'
elif self.ctype == 'int' and self.shape == (4,):
return 'dt_' + self.name[1:]
elif self.ctype == 'double' and self.shape == (3,):
return 'dt_double'
elif self.ctype == 'double' and self.shape == (2, 3):
return 'dt_pv'
elif self.ctype == 'double' and self.shape == (2,):
return 'dt_pvdpv'
elif self.ctype == 'double' and self.shape == (3, 3):
return 'dt_double'
elif not self.shape:
return 'dt_' + self.ctype
else:
raise ValueError("ctype {} with shape {} not recognized."
.format(self.ctype, self.shape))
@property
def view_dtype(self):
"""Name of dtype corresponding to the ctype for viewing back as array.
E.g., dt_double for double, dt_double33 for double[3][3].
The types are defined in core.py, where they are used for view-casts
of structured results as regular arrays.
"""
if self.ctype == 'const char':
return 'dt_bytes12'
elif self.ctype == 'char':
return 'dt_bytes1'
else:
raise ValueError('Only char ctype should need view back!')
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
size = 1
for s in self.shape:
size *= s
return size
@property
def cshape(self):
return ''.join([f'[{s}]' for s in self.shape])
@property
def signature_shape(self):
if self.ctype == 'eraLDBODY':
return '(n)'
elif self.ctype == 'double' and self.shape == (3,):
return '(d3)' if NUMPY_LT_1_16 else '(3)'
elif self.ctype == 'double' and self.shape == (3, 3):
return '(d3, d3)' if NUMPY_LT_1_16 else '(3, 3)'
else:
return '()'
class Argument(Variable):
def __init__(self, definition, doc):
self.definition = definition
self.doc = doc
self.__inout_state = None
self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1)
if "*" == ptr_name_arr[0]:
self.is_ptr = True
name_arr = ptr_name_arr[1:]
else:
self.is_ptr = False
name_arr = ptr_name_arr
if "[]" in ptr_name_arr:
self.is_ptr = True
name_arr = name_arr[:-2]
if "[" in name_arr:
self.name, arr = name_arr.split("[", 1)
self.shape = tuple([int(size) for size in arr[:-1].split("][")])
else:
self.name = name_arr
self.shape = ()
@property
def inout_state(self):
if self.__inout_state is None:
self.__inout_state = ''
for i in self.doc.input:
if self.name in i.name.split(','):
self.__inout_state = 'in'
for o in self.doc.output:
if self.name in o.name.split(','):
if self.__inout_state == 'in':
self.__inout_state = 'inout'
else:
self.__inout_state = 'out'
return self.__inout_state
@property
def name_for_call(self):
"""How the argument should be used in the call to the ERFA function.
This takes care of ensuring that inputs are passed by value,
as well as adding back the number of bodies for any LDBODY argument.
The latter presumes that in the ufunc inner loops, that number is
called 'nb'.
"""
if self.ctype == 'eraLDBODY':
assert self.name == 'b'
return 'nb, _' + self.name
elif self.is_ptr:
return '_'+self.name
else:
return '*_'+self.name
def __repr__(self):
return f"Argument('{self.definition}', name='{self.name}', ctype='{self.ctype}', inout_state='{self.inout_state}')"
class ReturnDoc:
def __init__(self, doc):
self.doc = doc
self.infoline = doc.split('\n')[0].strip()
self.type = self.infoline.split()[0]
self.descr = self.infoline.split()[1]
if self.descr.startswith('status'):
self.statuscodes = statuscodes = {}
code = None
for line in doc[doc.index(':')+1:].split('\n'):
ls = line.strip()
if ls != '':
if ' = ' in ls:
code, msg = ls.split(' = ')
if code != 'else':
code = int(code)
statuscodes[code] = msg
elif code is not None:
statuscodes[code] += ls
else:
self.statuscodes = None
def __repr__(self):
return f"Return value, type={self.type:15}, {self.descr}, {self.doc}"
class Return(Variable):
def __init__(self, ctype, doc):
self.name = 'c_retval'
self.inout_state = 'stat' if ctype == 'int' else 'ret'
self.ctype = ctype
self.shape = ()
self.doc = doc
def __repr__(self):
return f"Return(name='{self.name}', ctype='{self.ctype}', inout_state='{self.inout_state}')"
@property
def doc_info(self):
return self.doc.ret_info
class Function:
"""
A class representing a C function.
Parameters
----------
name : str
The name of the function
source_path : str
Either a directory, which means look for the function in a
stand-alone file (like for the standard ERFA distribution), or a
file, which means look for the function in that file (as for the
astropy-packaged single-file erfa.c).
match_line : str, optional
If given, searching of the source file will skip until it finds
a line matching this string, and start from there.
"""
def __init__(self, name, source_path, match_line=None):
self.name = name
self.pyname = name.split('era')[-1].lower()
self.filename = self.pyname+".c"
if os.path.isdir(source_path):
self.filepath = os.path.join(os.path.normpath(source_path), self.filename)
else:
self.filepath = source_path
with open(self.filepath) as f:
if match_line:
line = f.readline()
while line != '':
if line.startswith(match_line):
filecontents = '\n' + line + f.read()
break
line = f.readline()
else:
msg = ('Could not find the match_line "{0}" in '
'the source file "{1}"')
raise ValueError(msg.format(match_line, self.filepath))
else:
filecontents = f.read()
pattern = fr"\n([^\n]+{name} ?\([^)]+\)).+?(/\*.+?\*/)"
p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE)
search = p.search(filecontents)
self.cfunc = " ".join(search.group(1).split())
self.doc = FunctionDoc(search.group(2))
self.args = []
for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.search(f"^(.*){name}", self.cfunc).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def args_by_inout(self, inout_filter, prop=None, join=None):
"""
Gives all of the arguments and/or returned values, depending on whether
they are inputs, outputs, etc.
The value for `inout_filter` should be a string containing anything
that arguments' `inout_state` attribute produces. Currently, that can be:
* "in" : input
* "out" : output
* "inout" : something that's could be input or output (e.g. a struct)
* "ret" : the return value of the C function
* "stat" : the return value of the C function if it is a status code
It can also be a "|"-separated string giving inout states to OR
together.
"""
result = []
for arg in self.args:
if arg.inout_state in inout_filter.split('|'):
if prop is None:
result.append(arg)
else:
result.append(getattr(arg, prop))
if join is not None:
return join.join(result)
else:
return result
@property
def user_dtype(self):
"""The non-standard dtype, if any, needed by this function's ufunc.
This would be any structured array for any input or output, but
we give preference to LDBODY, since that also decides that the ufunc
should be a generalized ufunc.
"""
user_dtype = None
for arg in self.args_by_inout('in|inout|out'):
if arg.ctype == 'eraLDBODY':
return arg.dtype
elif user_dtype is None and arg.dtype not in ('dt_double',
'dt_int'):
user_dtype = arg.dtype
return user_dtype
@property
def signature(self):
"""Possible signature, if this function should be a gufunc."""
if all(arg.signature_shape == '()'
for arg in self.args_by_inout('in|inout|out')):
return None
return '->'.join(
[','.join([arg.signature_shape for arg in args])
for args in (self.args_by_inout('in|inout'),
self.args_by_inout('inout|out|ret|stat'))])
def _d3_fix_arg_and_index(self):
if not any('d3' in arg.signature_shape
for arg in self.args_by_inout('in|inout')):
for j, arg in enumerate(self.args_by_inout('out')):
if 'd3' in arg.signature_shape:
return j, arg
return None, None
@property
def d3_fix_op_index(self):
"""Whether only output arguments have a d3 dimension."""
index = self._d3_fix_arg_and_index()[0]
if index is not None:
len_in = len(list(self.args_by_inout('in')))
len_inout = len(list(self.args_by_inout('inout')))
index += + len_in + 2 * len_inout
return index
@property
def d3_fix_arg(self):
"""Whether only output arguments have a d3 dimension."""
return self._d3_fix_arg_and_index()[1]
@property
def python_call(self):
outnames = [arg.name for arg in self.args_by_inout('inout|out|stat|ret')]
argnames = [arg.name for arg in self.args_by_inout('in|inout')]
argnames += [arg.name for arg in self.args_by_inout('inout')]
d3fix_index = self._d3_fix_arg_and_index()[0]
if d3fix_index is not None:
argnames += ['None'] * d3fix_index + [self.d3_fix_arg.name]
return '{out} = {func}({args})'.format(out=', '.join(outnames),
func='ufunc.' + self.pyname,
args=', '.join(argnames))
def __repr__(self):
return f"Function(name='{self.name}', pyname='{self.pyname}', filename='{self.filename}', filepath='{self.filepath}')"
class Constant:
def __init__(self, name, value, doc):
self.name = name.replace("ERFA_", "")
self.value = value.replace("ERFA_", "")
self.doc = doc
class ExtraFunction(Function):
"""
An "extra" function - e.g. one not following the SOFA/ERFA standard format.
Parameters
----------
cname : str
The name of the function in C
prototype : str
The prototype for the function (usually derived from the header)
pathfordoc : str
The path to a file that contains the prototype, with the documentation
as a multiline string *before* it.
"""
def __init__(self, cname, prototype, pathfordoc):
self.name = cname
self.pyname = cname.split('era')[-1].lower()
self.filepath, self.filename = os.path.split(pathfordoc)
self.prototype = prototype.strip()
if prototype.endswith('{') or prototype.endswith(';'):
self.prototype = prototype[:-1].strip()
incomment = False
lastcomment = None
with open(pathfordoc, 'r') as f:
for l in f:
if incomment:
if l.lstrip().startswith('*/'):
incomment = False
lastcomment = ''.join(lastcomment)
else:
if l.startswith('**'):
l = l[2:]
lastcomment.append(l)
else:
if l.lstrip().startswith('/*'):
incomment = True
lastcomment = []
if l.startswith(self.prototype):
self.doc = lastcomment
break
else:
raise ValueError('Did not find prototype {} in file '
'{}'.format(self.prototype, pathfordoc))
self.args = []
argset = re.search(fr"{self.name}\(([^)]+)?\)",
self.prototype).group(1)
if argset is not None:
for arg in argset.split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.match(f"^(.*){self.name}",
self.prototype).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def __repr__(self):
r = super().__repr__()
if r.startswith('Function'):
r = 'Extra' + r
return r
def main(srcdir=DEFAULT_ERFA_LOC, outfn='core.py', ufuncfn='ufunc.c',
templateloc=DEFAULT_TEMPLATE_LOC, extra='erfa_additions.h',
verbose=True):
from jinja2 import Environment, FileSystemLoader
if verbose:
print_ = lambda *args, **kwargs: print(*args, **kwargs)
else:
print_ = lambda *args, **kwargs: None
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(templateloc))
def prefix(a_list, pre):
return [pre+f'{an_element}' for an_element in a_list]
def postfix(a_list, post):
return [f'{an_element}'+post for an_element in a_list]
def surround(a_list, pre, post):
return [pre+f'{an_element}'+post for an_element in a_list]
env.filters['prefix'] = prefix
env.filters['postfix'] = postfix
env.filters['surround'] = surround
erfa_c_in = env.get_template(ufuncfn + '.templ')
erfa_py_in = env.get_template(outfn + '.templ')
# Extract all the ERFA function names from erfa.h
if os.path.isdir(srcdir):
erfahfn = os.path.join(srcdir, 'erfa.h')
multifilserc = True
else:
erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h')
multifilserc = False
with open(erfahfn, "r") as f:
erfa_h = f.read()
print_("read erfa header")
if extra:
with open(os.path.join(templateloc or '.', extra), "r") as f:
erfa_h += f.read()
print_("read extra header")
funcs = OrderedDict()
section_subsection_functions = re.findall(
r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', erfa_h,
flags=re.DOTALL | re.MULTILINE)
for section, subsection, functions in section_subsection_functions:
print_(f"{section}.{subsection}")
# Right now, we compile everything, but one could be more selective.
# In particular, at the time of writing (2018-06-11), what was
# actually require for astropy was not quite everything, but:
# ((section == 'Extra')
# or (section == "Astronomy")
# or (subsection == "AngleOps")
# or (subsection == "SphericalCartesian")
# or (subsection == "MatrixVectorProducts")
# or (subsection == 'VectorOps'))
if True:
func_names = re.findall(r' (\w+)\(.*?\);', functions,
flags=re.DOTALL)
for name in func_names:
print_(f"{section}.{subsection}.{name}...")
if multifilserc:
# easy because it just looks in the file itself
cdir = (srcdir if section != 'Extra' else
templateloc or '.')
funcs[name] = Function(name, cdir)
else:
# Have to tell it to look for a declaration matching
# the start of the header declaration, otherwise it
# might find a *call* of the function instead of the
# definition
for line in functions.split(r'\n'):
if name in line:
# [:-1] is to remove trailing semicolon, and
# splitting on '(' is because the header and
# C files don't necessarily have to match
# argument names and line-breaking or
# whitespace
match_line = line[:-1].split('(')[0]
funcs[name] = Function(name, cdir, match_line)
break
else:
raise ValueError("A name for a C file wasn't "
"found in the string that "
"spawned it. This should be "
"impossible!")
funcs = funcs.values()
# Extract all the ERFA constants from erfam.h
erfamhfn = os.path.join(srcdir, 'erfam.h')
with open(erfamhfn, 'r') as f:
erfa_m_h = f.read()
constants = []
for chunk in erfa_m_h.split("\n\n"):
result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk,
flags=re.DOTALL | re.MULTILINE)
if result:
doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL)
for (name, value) in result:
constants.append(Constant(name, value, doc))
# TODO: re-enable this when const char* return values and
# non-status code integer rets are possible
# #Add in any "extra" functions from erfaextra.h
# erfaextrahfn = os.path.join(srcdir, 'erfaextra.h')
# with open(erfaextrahfn, 'r') as f:
# for l in f:
# ls = l.strip()
# match = re.match('.* (era.*)\(', ls)
# if match:
# print_("Extra: {0} ...".format(match.group(1)))
# funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn))
print_("Rendering template")
erfa_c = erfa_c_in.render(funcs=funcs, NUMPY_LT_1_16=NUMPY_LT_1_16)
erfa_py = erfa_py_in.render(funcs=funcs, constants=constants,
NUMPY_LT_1_16=NUMPY_LT_1_16)
if outfn is not None:
print_("Saving to", outfn, 'and', ufuncfn)
with open(os.path.join(templateloc, outfn), "w") as f:
f.write(erfa_py)
with open(os.path.join(templateloc, ufuncfn), "w") as f:
f.write(erfa_c)
print_("Done!")
return erfa_c, erfa_py, funcs
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?',
help='Directory where the ERFA c and header files '
'can be found or to a single erfa.c file '
'(which must be in the same directory as '
'erfa.h). Defaults to the builtin astropy '
'erfa: "{}"'.format(DEFAULT_ERFA_LOC))
ap.add_argument('-o', '--output', default='core.py',
help='The output filename for the pure-python output.')
ap.add_argument('-u', '--ufunc', default='ufunc.c',
help='The output filename for the ufunc .c output')
ap.add_argument('-t', '--template-loc',
default=DEFAULT_TEMPLATE_LOC,
help='the location where the "core.py.templ" and '
'"ufunc.c.templ templates can be found.')
ap.add_argument('-x', '--extra',
default='erfa_additions.h',
help='header file for any extra files in the template '
'location that should be included.')
ap.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output normally printed to stdout.')
args = ap.parse_args()
main(args.srcdir, args.output, args.ufunc, args.template_loc,
args.extra)
|
bsd-3-clause
| -3,515,980,912,529,225,000
| 36.135685
| 136
| 0.517995
| false
| 3.964224
| false
| false
| false
|
ufukdogan92/is-teklif-sistemi
|
teklif/models.py
|
1
|
1928
|
from django.db import models
from ilan.models import Ilan
from kullanici.models import IsArayan
from register.models import Register
class Teklif(models.Model):
ilan = models.ForeignKey(Ilan,blank=True,null=True,related_name="odeme_ilanı")
teklif_veren = models.OneToOneField(IsArayan,related_name="is_arayan")
butce = models.IntegerField()
sure = models.IntegerField()
onay_durumu = models.BooleanField(default=False)
teklif_tarihi = models.DateTimeField(auto_now_add=True)
duzenlenme_tarihi = models.DateField(auto_now=True)
def __str__(self):
return self.ilan.ilan_basligi+ " ilanına "+ self.teklif_veren.kullanici.username + " kullanıcısının Teklifi"
class Meta:
verbose_name ="Teklifler"
verbose_name_plural="Teklif"
def save(self, *args, **kwargs):
from register.models import Register
self.ilan = Register.teklifVermeBaslat(self.ilan.pk)
self.teklif_veren = Register.getIsArayan(self.teklif_veren.pk)
super(Teklif, self).save(*args, **kwargs)
class TeklifOnay(models.Model):
teklif = models.OneToOneField(Teklif,related_name="teklif_onay")
onay_durumu = models.BooleanField(default=True)
onay_tarihi = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.teklif.ilan.ilan_basligi+ " ilanına verilen teklifin onayı"
class Meta:
verbose_name ="Teklif Onayı"
verbose_name_plural="Teklif Onayları"
def save(self, *args, **kwargs):
if not self.pk:
from odeme.models import Odeme
teklif = Teklif.objects.get(pk=self.teklif.pk)
self.onay_durumu = True
self.tarihi = self.onay_tarihi
odeme = Odeme(odeme_basligi=teklif.ilan.ilan_basligi,ucret=teklif.butce,sure=teklif.sure,teklif=teklif)
odeme.save()
super(TeklifOnay, self).save(*args, **kwargs)
|
gpl-3.0
| -4,214,891,582,358,801,000
| 36.627451
| 116
| 0.678832
| false
| 2.941718
| false
| false
| false
|
kg-bot/SupyBot
|
plugins/Misc1/__init__.py
|
1
|
2791
|
###
# Copyright (c) 2014, KG-Bot
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/Misc1/download'
from . import config
from . import plugin
from imp import reload
# In case we're being reloaded.
reload(config)
reload(plugin)
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
gpl-3.0
| 5,609,393,713,654,504,000
| 38.449275
| 79
| 0.741311
| false
| 3.998567
| false
| false
| false
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/case/test_rule_003.py
|
1
|
1130
|
import os
import unittest
from vsg.rules import case
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_003_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_003_test_input.fixed.vhd'), lExpected)
class test_case_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_003(self):
oRule = case.rule_003()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'case')
self.assertEqual(oRule.identifier, '003')
lExpected = [24]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_003(self):
oRule = case.rule_003()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
gpl-3.0
| -605,416,265,127,274,600
| 24.111111
| 106
| 0.673451
| false
| 3.219373
| true
| false
| false
|
TakeshiTseng/SDN-Work
|
mininet/bgp-3as/as.py
|
1
|
2401
|
#!/usr/bin/env python
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel
'''
h1 -- r1 -- r2 -- r3 -- h3
|
h2
h1 - r1 : 10.0.1.0/24
h2 - r2 : 10.0.2.0/24
h3 - r3 : 10.0.3.0/24
r1 - r2 : 192.168.1.0/24
r2 - r3 : 192.168.2.0/24
'''
if '__main__' == __name__:
setLogLevel('debug')
net = Mininet(controller=None)
h1 = net.addHost('h1', ip="10.0.1.1/24")
h2 = net.addHost('h2', ip="10.0.2.1/24")
h3 = net.addHost('h3', ip="10.0.3.1/24")
r1 = net.addHost('r1')
r2 = net.addHost('r2')
r3 = net.addHost('r3')
net.addLink(r1, r2)
net.addLink(r2, r3)
net.addLink(h1, r1)
net.addLink(h2, r2)
net.addLink(h3, r3)
net.build()
# default route for hosts
h1.cmd('ip r add 0.0.0.0/0 via 10.0.1.254')
h2.cmd('ip r add 0.0.0.0/0 via 10.0.2.254')
h3.cmd('ip r add 0.0.0.0/0 via 10.0.3.254')
# remove default ip address
r1.cmd('ip a del 10.0.0.4/8 dev r1-eth0')
r2.cmd('ip a del 10.0.0.5/8 dev r2-eth0')
r3.cmd('ip a del 10.0.0.6/8 dev r3-eth0')
# ip for router facing hosts
r1.cmd('ip a add 10.0.1.254/24 dev r1-eth1')
r2.cmd('ip a add 10.0.2.254/24 dev r2-eth2')
r3.cmd('ip a add 10.0.3.254/24 dev r3-eth1')
# subnet between r1 and r2
r1.cmd('ip a add 192.168.1.1/24 dev r1-eth0')
r2.cmd('ip a add 192.168.1.2/24 dev r2-eth0')
# subnet between r2 and r3
r2.cmd('ip a add 192.168.2.1/24 dev r2-eth1')
r3.cmd('ip a add 192.168.2.2/24 dev r3-eth0')
# quagga
r1.cmd('/usr/lib/quagga/zebra -d -f zebra-r1.conf -z /var/run/quagga/zebra-r1.api -i /var/run/quagga/zebra-r1.pid')
r1.cmd('/usr/lib/quagga/bgpd -d -f r1.conf -z /var/run/quagga/zebra-r1.api -i /var/run/quagga/bgpd-r1.pid')
r2.cmd('/usr/lib/quagga/zebra -d -f zebra-r2.conf -z /var/run/quagga/zebra-r2.api -i /var/run/quagga/zebra-r2.pid')
r2.cmd('/usr/lib/quagga/bgpd -d -f r2.conf -z /var/run/quagga/zebra-r2.api -i /var/run/quagga/bgpd-r2.pid')
r3.cmd('/usr/lib/quagga/zebra -d -f zebra-r3.conf -z /var/run/quagga/zebra-r3.api -i /var/run/quagga/zebra-r3.pid')
r3.cmd('/usr/lib/quagga/bgpd -d -f r3.conf -z /var/run/quagga/zebra-r3.api -i /var/run/quagga/bgpd-r3.pid')
CLI(net)
# kill bgpd and zebra
r1.cmd('killall bgpd zebra')
r2.cmd('killall bgpd zebra')
r3.cmd('killall bgpd zebra')
net.stop()
|
mit
| -5,056,204,420,631,575,000
| 30.592105
| 119
| 0.598917
| false
| 2.093287
| false
| false
| false
|
jrydberg/guild
|
guild/actor.py
|
1
|
26454
|
# Copyright (c) 2012 Johan Rydberg
# Copyright (c) 2009 Donovan Preston
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import uuid
import weakref
try:
import simplejson as json
except ImportError:
import json
from gevent import Greenlet, Timeout, local, core
from gevent.event import Event
from gevent.hub import GreenletExit
import gevent
from guild import exc, shape
class ActorError(RuntimeError):
"""Base class for actor exceptions.
"""
class LinkBroken(ActorError):
""":"""
class Killed(ActorError):
"""Exception which is raised when an Actor is killed.
"""
class DeadActor(ActorError):
"""Exception which is raised when a message is sent to an Address which
refers to an Actor which is no longer running.
"""
class ReceiveTimeout(ActorError):
"""Internal exception used to signal receive timeouts.
"""
class InvalidCallMessage(ActorError):
"""Message doesn't match call message shape.
"""
class RemoteAttributeError(ActorError, AttributeError):
pass
class RemoteException(ActorError):
pass
def build_call_pattern(method,message=object):
call_pat = CALL_PATTERN.copy()
call_pat['method'] = method
call_pat['message'] = message
return call_pat
def lazy_property(property_name, property_factory, doc=None):
def get(self):
if not hasattr(self, property_name):
setattr(self, property_name, property_factory(self))
return getattr(self, property_name)
return property(get)
_curactor = local.local()
def curactor():
"""Return the current actor."""
return _curactor.current
def _setcurrent(actor):
_curactor.current = actor
def curaddr():
"""Return address of current actor."""
return curactor().address
def curmesh():
return curactor().mesh
def curnode():
return curactor().node
def register(name, address):
"""Associates the name C{name} with the address C{address}."""
curnode().register(name, address)
def whereis(name):
"""Returns the address registered under C{name}, or C{None} if the
name is not registered.
"""
return curnode().whereis(name)
def is_actor_type(obj):
"""Return True if obj is a subclass of Actor, False if not.
"""
try:
return issubclass(obj, Actor)
except TypeError:
return False
def spawn(spawnable, *args, **kw):
"""Start a new Actor. If spawnable is a subclass of Actor,
instantiate it with no arguments and call the Actor's "main"
method with *args and **kw.
If spawnable is a callable, call it inside a new Actor with the first
argument being the "receive" method to use to retrieve messages out
of the Actor's mailbox, followed by the given *args and **kw.
Return the Address of the new Actor.
"""
return curnode().spawn(spawnable, *args, **kw)
def spawn_link(spawnable, *args, **kw):
"""Just like spawn, but the currently running Actor will be linked
to the new actor. If an exception occurs or the Actor finishes
execution, a message will be sent to the Actor which called
spawn_link with details.
When an exception occurs, the message will have a pattern like:
{'address': eventlet.actor.Address, 'exception': dict}
The "exception" dict will have information from the stack trace extracted
into a tree of simple Python objects.
On a normal return from the Actor, the actor's return value is given
in a message like:
{'address': eventlet.actor.Address, 'exit': object}
"""
return curnode().spawn_link(spawnable, *args, **kw)
def handle_custom(obj):
if isinstance(obj, Address):
return obj.to_json()
if isinstance(obj, Ref):
return obj.to_json()
raise TypeError(obj)
def generate_custom(obj):
address = Address.from_json(obj)
if address:
return address
ref = Ref.from_json(obj)
if ref:
return ref
return obj
class Ref(object):
"""A reference."""
def __init__(self, node_id, ref_id):
self._node_id = node_id
self._ref_id = ref_id
ref_id = property(lambda self: self._ref_id)
node_id = property(lambda self: self._node_id)
def to_json(self):
return {'_pyact_ref_id': self._ref_id,
'_pyact_node_id': self._node_id}
@classmethod
def from_json(cls, obj):
if sorted(obj.keys()) == ['_pyact_ref_id', '_pyact_node_id']:
return Ref(obj['_pyact_node_id'], obj['_pyact_ref_id'])
return None
def __eq__(self, other):
return (isinstance(other, Ref)
and other.node_id == self.node_id
and other.ref_id == self.ref_id)
def __hash__(self):
return hash((self.node_id, self._ref_id))
class MonitorRef(object):
def __init__(self, address, ref):
self.address = address
self.ref = ref
def demonitor(self):
curmesh().demonitor(self.address, self.ref)
class Address(object):
"""An Address is a reference to another Actor.
Any Actor which has an Address can asynchronously put a message in
that Actor's mailbox. This is called a "cast". To send a message
to another Actor and wait for a response, use "call" instead.
Note that an Address instance itself is rather useless. You need
node or a mesh to actually send a message.
"""
def __init__(self, node_id, actor_id):
self._node_id = node_id
self._actor_id = actor_id
actor_id = property(lambda self: self._actor_id)
node_id = property(lambda self: self._node_id)
def to_json(self):
return {'_pyact_actor_id': self._actor_id,
'_pyact_node_id': self._node_id}
@classmethod
def from_json(cls, obj):
if sorted(obj.keys()) == ['_pyact_actor_id', '_pyact_node_id']:
return Address(obj['_pyact_node_id'], obj['_pyact_actor_id'])
return None
def __eq__(self, other):
return (isinstance(other, Address)
and other.node_id == self.node_id
and other.actor_id == self.actor_id)
def __hash__(self):
return hash((self.node_id, self._actor_id))
def cast(self, message):
"""Send a message to the Actor this object addresses."""
curnode().send(self, message)
def __repr__(self):
return "<%s %s/%s>" % (self.__class__.__name__,
self._node_id, self._actor_id)
def __str__(self):
return "<actor %s/%s>" % (self._node_id, self._actor_id)
def __or__(self, message):
"""Use Erlang-y syntax (| instead of !) to send messages.
addr | msg
is equivalent to:
addr.cast(msg)
"""
self.cast(message)
def monitor(self):
"""Monitor the Actor this object addresses.
When the actor dies, a exit message will be sent to the
current actor.
This call returns a reference that can be used to cancel the
monitor with the C{demonitor} function.
"""
ref = curnode().make_ref()
curmesh().monitor(self, curaddr(), ref)
return MonitorRef(self, ref)
def demonitor(self, ref):
"""Cancel a monitor."""
curmesh().demonitor(self, ref)
def link(self):
"""Link the current actor to the actor this object addresses.
"""
print "addr.link curr %s to %s" % (curaddr(), self)
curactor().link(self)
#curmesh().link(self, curaddr())
def call(self, method, message=None, timeout=None):
"""Send a message to the Actor this object addresses. Wait
for a result. If a timeout in seconds is passed, raise
C{gevent.Timeout} if no result is returned in less than the
timeout.
This could have nicer syntax somehow to make it look like an
actual method call.
"""
message_id = str(uuid.uuid4())
my_address = curaddr()
self.cast(
{'call': message_id, 'method': method,
'address': my_address, 'message': message})
if timeout is None:
cancel = None
else:
# Raise any TimeoutError to the caller so they can handle
# it.
cancel = gevent.Timeout(timeout)
cancel.start()
RSP = {'response': message_id, 'message': object}
EXC = {'response': message_id, 'exception': object}
INV = {'response': message_id, 'invalid_method': str}
pattern, response = curactor().receive(RSP, EXC, INV)
if cancel is not None:
cancel.cancel()
if pattern is INV:
raise RemoteAttributeError(method)
elif pattern is EXC:
raise RemoteException(response)
return response['message']
def __getattr__(self, method):
"""Support address.<method>(message, timout) call pattern.
For example:
addr.call('test') could be written as addr.test()
"""
f = lambda message=None, timeout=None: self.call(
method, message, timeout)
return f
class _Greenlet(Greenlet):
"""Private version of the greenlet that doesn't dump a stacktrace
to stderr when a greenlet dies.
"""
def _report_error(self, exc_info):
self._exc_info = exc_info
exception = exc_info[1]
if isinstance(exception, GreenletExit):
self._report_result(exception)
return
self._exception = exception
if self._links and self._notifier is None:
self._notifier = core.active_event(self._notify_links)
CALL_PATTERN = {'call': str, 'method': str, 'address': Address,
'message': object}
REMOTE_CALL_PATTERN = {'remotecall':str,
'method':str,
'message':object,
'timeout':object}
RESPONSE_PATTERN = {'response': str, 'message': object}
INVALID_METHOD_PATTERN = {'response': str, 'invalid_method': str}
EXCEPTION_PATTERN = {'response': str, 'exception':object}
class Monitor(object):
def __init__(self, actor, ref, to_addr):
self.actor = actor
self.ref = ref
self.to_addr = to_addr
def _send_exit(self, *args):
self.actor._send_exit(self.to_addr, self.ref)
class Actor(object):
"""An Actor is a Greenlet which has a mailbox. Any other Actor
which has the Address can asynchronously put messages in this
mailbox.
The Actor extracts messages from this mailbox using a technique
called selective receive. To receive a message, the Actor calls
self.receive, passing in any number of "shapes" to match against
messages in the mailbox.
A shape describes which messages will be extracted from the
mailbox. For example, if the message ('credit', 250.0) is in the
mailbox, it could be extracted by calling self.receive(('credit',
int)). Shapes are Python object graphs containing only simple
Python types such as tuple, list, dictionary, integer, and string,
or type object constants for these types.
Since multiple patterns may be passed to receive, the return value
is (matched_pattern, message). To receive any message which is in
the mailbox, simply call receive with no patterns.
"""
_wevent = None
_args = (), {}
actor_id = property(lambda self: self._actor_id)
dead = property(lambda self: self.greenlet.ready())
def __init__(self, run=None, node=None, mesh=None):
if run is None:
self._to_run = self.main
else:
self._to_run = lambda *args, **kw: run(self.receive, *args, **kw)
self._actor_id = str(uuid.uuid4())
print "created actor", self._actor_id
self.greenlet = _Greenlet(self._run)
self.start = self.greenlet.start
self.start_later = self.greenlet.start_later
self.node = node
self.mesh = mesh
self._mailbox = []
self.address = Address(node.id, self._actor_id)
self.trap_exit = False
self.monitors = {}
def _run(self):
"""Run the actor."""
args, kw = self._args
del self._args
to_run = self._to_run
del self._to_run
_setcurrent(self)
return to_run(*args, **kw)
def _match_patterns(self,patterns):
"""Internal method to match a list of patterns against
the mailbox. If message matches any of the patterns,
that message is removed from the mailbox and returned
along with the pattern it matched. If message doesn't
match any pattern then None,None is returned.
"""
for i, message in enumerate(self._mailbox):
for pattern in patterns:
if shape.is_shaped(message, pattern):
del self._mailbox[i]
return pattern, message
return None,None
def receive(self, *patterns, **kw):
"""Select a message out of this Actor's mailbox. If patterns
are given, only select messages which match these shapes.
Otherwise, select the next message.
"""
timeout = kw.get('timeout', None)
if timeout == 0 :
if not patterns:
if self._mailbox:
return {object: object}, self._mailbox.pop(0)
else:
return None,None
return self._match_patterns(patterns)
if timeout is not None:
timer = gevent.Timeout(timeout, ReceiveTimeout)
timer.start()
else:
timer = None
try:
while True:
if patterns:
matched_pat, matched_msg = self._match_patterns(patterns)
elif self._mailbox:
matched_pat, matched_msg = ({object:object},
self._mailbox.pop(0))
else:
matched_pat = None
if matched_pat is not None:
if timer:
timer.cancel()
return matched_pat, matched_msg
self._wevent = Event()
try:
# wait until at least one message or timeout
self._wevent.wait()
finally:
self._wevent = None
except ReceiveTimeout:
return (None,None)
def link(self, to_addr):
"""Link this actor to a remote address."""
self._link(to_addr)
self.mesh.link(to_addr, self.address)
def _send_exit(self, to_addr, ref=None):
"""Send an exit message to the remote address."""
if self.greenlet.exception:
message = {'exit': self.address, 'exception': exc.format_exc(
self.greenlet._exc_info)}
else:
message = {'exit': self.address, 'value': self.greenlet.value}
if ref:
message['ref'] = ref
message = json.dumps(message, default=handle_custom)
self.mesh.exit(self.address, to_addr, message)
def _link(self, to_addr):
"""For internal use.
Link the Actor at the given Address to this Actor.
If this Actor has an unhandled exception, cast a message
containing details about the exception to the Address.
"""
print "we link %s to %s" % (self.address, to_addr)
self.greenlet.link(lambda g: self._send_exit(to_addr))
def _monitor(self, to_addr, ref):
"""For internal use.
XXX
"""
if self.greenlet.ready():
self._send_exit(to_addr, ref)
else:
monitor = Monitor(self, ref, to_addr)
self.greenlet.link(monitor._send_exit)
self.monitors[ref] = monitor
def _demonitor(self, to_addr, ref):
if ref in self.monitors:
monitor = self.monitors.pop(ref)
self.greenlet.unlink(monitor._send_exit)
def _cast(self, message):
"""For internal use.
Nodes uses this to insert a message into this Actor's mailbox.
"""
self._mailbox.append(json.loads(message, object_hook=generate_custom))
if self._wevent and not self._wevent.is_set():
self._wevent.set()
def _exit(self, from_addr, message):
"""For internal use.
Handle a received exit signal.
"""
if self.trap_exit:
self._cast(message)
else:
# The actor do not trap the exit, which means we should
# terminate it. But only if it was an abnormal
# termination.
message = json.loads(message, object_hook=generate_custom)
if not message.has_key('value'):
self.greenlet.kill(LinkBroken(from_addr, message),
block=False)
def _get(self, timeout=None):
"""For internal use.
Wait until the actor finishes.
"""
return self.greenlet.get(timeout=timeout)
def main(self, *args, **kw):
"""If subclassing Actor, override this method to implement the Actor's
main loop.
"""
raise NotImplementedError("Implement in subclass.")
def sleep(self, amount):
gevent.sleep(amount)
def cast(self, address, message):
"""Send a message to the given address."""
self.mesh.cast(address, json.dumps(message, default=handle_custom))
class Server(Actor):
"""An actor which responds to the call protocol by looking for the
specified method and calling it.
Also, Server provides start and stop methods which can be overridden
to customize setup.
"""
def respond(self, orig_message, response=None):
if not shape.is_shaped(orig_message, CALL_PATTERN):
raise InvalidCallMessage(str(orig_message))
orig_message['address'].cast({'response':orig_message['call'],
'message':response})
def respond_invalid_method(self, orig_message, method):
if not shape.is_shaped(orig_message, CALL_PATTERN):
raise InvalidCallMessage(str(orig_message))
orig_message['address'].cast({'response':orig_message['call'],
'invalid_method':method})
def respond_exception(self, orig_message, exception):
if not shape.is_shaped(orig_message, CALL_PATTERN):
raise InvalidCallMessage(str(orig_message))
orig_message['address'].cast({'response':orig_message['call'],
'exception':exception})
def start(self, *args, **kw):
"""Override to be notified when the server starts.
"""
pass
def stop(self, *args, **kw):
"""Override to be notified when the server stops.
"""
pass
def main(self, *args, **kw):
"""Implement the actor main loop by waiting forever for messages.
Do not override.
"""
self.start(*args, **kw)
try:
while True:
pattern, message = self.receive(CALL_PATTERN)
method = getattr(self, message['method'], None)
if method is None:
self.respond_invalid_method(message, message['method'])
continue
try:
self.respond(message, method(message['message']))
except Exception:
formatted = exc.format_exc()
self.respond_exception(message, formatted)
finally:
self.stop(*args, **kw)
class Mesh(object):
"""A mesh of nodes.
Nodes are registed using C{add} when they arrive into the mesh.
It is up to an external coordinator to detect when new nodes
arrive.
"""
def __init__(self):
self._nodes = {}
def add(self, node):
"""Add a reachable node to the mesh."""
self._nodes[node.id] = node
def remove(self, id):
"""Remove a node from the mesh."""
del self._nodes[id]
def _forward(self, address, fn, *args):
"""For internal use."""
node = self._nodes.get(address.node_id)
return getattr(node, fn)(*args)
def exit(self, from_addr, to_addr, message):
"""Send an exit signal from Actor C{form_addr}."""
print "exit", from_addr, "to", to_addr, "message", message
self._forward(to_addr, '_exit', to_addr, from_addr, message)
def cast(self, address, message):
"""Send a message to a node in the mesh designated by the given
address.
The message may be silently dropped if the remote node do not
exit or if the actor is dead.
"""
self._forward(address, '_cast', address, message)
def link(self, address, to_addr):
"""Link actor C{pid1} to actor with address C{pid2}.
"""
self._forward(address, '_link', address, to_addr)
def monitor(self, address, to_addr, ref):
"""Monitor C{address}."""
self._forward(address, '_monitor', address, to_addr, ref)
def demonitor(self, address, ref):
"""."""
self._forward(address, '_demonitor', address, ref)
class Node(object):
"""Representation of a node in a mesh of nodes."""
id = property(lambda self: self._id)
def __init__(self, mesh, id):
"""Create a new node."""
self._id = id
self._mesh = mesh
self.actors = weakref.WeakValueDictionary()
mesh.add(self)
self.registry = {}
def make_ref(self):
"""Return a new reference."""
return Ref(self._id, str(uuid.uuid4()))
def register(self, name, address):
"""Associates the name C{name} with the process C{address}."""
assert address.node_id == self.id
if address.actor_id not in self.actors:
raise DeadActor()
if name in self.registry:
raise Exception("Conflicting name")
actor = self.actors[address.actor_id]
self.registry[name] = actor
actor._link(lambda _: self.registry.pop(name))
def whereis(self, name):
"""Return address of registered name C{name} or C{None} if
there's no address with that name.
"""
if name in self.registry:
return self.registry[name].address
def wait(self, address, timeout=None):
"""Wait for actor designated by address to finish."""
assert address.node_id == self._id
if address.actor_id not in self.actors:
raise DeadActor()
return self.actors[address.actor_id]._get(timeout=timeout)
def spawn(self, spawnable, *args, **kw):
"""Start a new actor.
If spawnable is a subclass of Actor, instantiate it with no
arguments and call the Actor's "main" method with *args and
**kw.
If spawnable is a callable, call it inside a new Actor with
the first argument being the "receive" method to use to
retrieve messages out of the Actor's mailbox, followed by the
given *args and **kw.
Return the Address of the new Actor.
"""
if is_actor_type(spawnable):
spawnable = spawnable(node=self, mesh=self._mesh)
else:
spawnable = Actor(spawnable, node=self, mesh=self._mesh)
# Add the actor to the registry, and have it removed when the
# actor dies.
self.actors[spawnable.actor_id] = spawnable
# FIXME (jrydberg): We could pass this as to the ctor.
spawnable._args = (args, kw)
spawnable.start()
return spawnable.address
def spawn_link(self, spawnable, *args, **kw):
"""."""
address = self.spawn(spawnable, *args, **kw)
print "spawned", address
address.link()
return address
def send(self, address, message):
"""Send a message to an actor on this node or another one.
"""
self._mesh.cast(address, json.dumps(message, default=handle_custom))
def _cast(self, address, message):
"""For internal use.
Send a message to an actor on this node.
"""
_actor = self.actors.get(address.actor_id)
if _actor is None or _actor.dead:
# Silently drop the message.
return
_actor._cast(message)
def _exit(self, address, from_addr, message):
try:
_actor = self.actors[address.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._exit(from_addr, message)
def _link(self, from_addr, to_addr):
"""For internal use."""
try:
_actor = self.actors[from_addr.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._link(to_addr)
def _monitor(self, address, to_addr, ref):
try:
_actor = self.actors[address.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._monitor(to_addr, ref)
def _demonitor(self, address, ref):
try:
_actor = self.actors[address.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._demonitor(address, ref)
|
mit
| 2,611,560,143,730,872,300
| 30.605735
| 78
| 0.592878
| false
| 4.110317
| false
| false
| false
|
SKIRT/PTS
|
magic/dist_ellipse.py
|
1
|
2347
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import standard modules
import numpy as np
# -----------------------------------------------------------------
def distance_ellipse(shape, center, ratio, angle):
"""
:return:
"""
return dist_ellipse(shape, center.x, center.y, ratio, angle.to("deg").value)
# -----------------------------------------------------------------
def dist_ellipse(n, xc, yc, ratio, pa=0): # original implementation (like DIST_ELLIPSE IDL function)
"""
N = either a scalar specifying the size of the N x N square output
array, or a 2 element vector specifying the size of the
M x N rectangular output array.
XC,YC - Scalars giving the position of the ellipse center. This does
not necessarily have to be within the image
RATIO - Scalar giving the ratio of the major to minor axis. This
should be greater than 1 for position angle to have its
standard meaning.
OPTIONAL INPUTS:
POS_ANG - Position angle of the major axis in degrees, measured counter-clockwise
from the Y axis. For an image in standard orientation
(North up, East left) this is the astronomical position angle.
Default is 0 degrees.
OUTPUT:
IM - REAL*4 elliptical mask array, of size M x N. THe value of each
pixel is equal to the semi-major axis of the ellipse of center
XC,YC, axial ratio RATIO, and position angle POS_ANG, which
passes through the pixel.
"""
ang = np.radians(pa + 90.)
cosang = np.cos(ang)
sinang = np.sin(ang)
nx = n[1]
ny = n[0]
x = np.arange(-xc,nx-xc)
y = np.arange(-yc,ny-yc)
im = np.empty(n)
xcosang = x*cosang
xsinang = x*sinang
for i in range(0, ny):
xtemp = xcosang + y[i]*sinang
ytemp = -xsinang + y[i]*cosang
im[i,:] = np.sqrt((xtemp*ratio)**2 + ytemp**2)
return im
# -----------------------------------------------------------------
|
agpl-3.0
| -1,271,247,990,375,277,300
| 34.545455
| 100
| 0.511083
| false
| 4.037866
| false
| false
| false
|
theresaswayne/imagej-plugins
|
Demos and Tests/misc scripts/Crop_Confocal_Series_corrected.py
|
1
|
1332
|
# @OpService ops
# @Dataset data
# @UIService ui
# @OUTPUT ImgPlus c0
# @OUTPUT ImgPlus z12
# @OUTPUT ImgPlus c0z12
# @OUTPUT ImgPlus roiC0z12
# to run this tutorial run 'file->Open Samples->Confocal Series' and make sure that
# confocal-series.tif is the active image
from net.imglib2.util import Intervals
from net.imagej.axis import Axes
# first take a look at the size and type of each dimension
for d in range(data.numDimensions()):
print "axis d: type: "+str(data.axis(d).type())+" length: "+str(data.dimension(d))
img=data.getImgPlus()
xLen = data.dimension(data.dimensionIndex(Axes.X))
yLen = data.dimension(data.dimensionIndex(Axes.Y))
zLen = data.dimension(data.dimensionIndex(Axes.Z))
cLen = data.dimension(data.dimensionIndex(Axes.CHANNEL))
# crop a channel
c0=ops.run("transform.crop",img, Intervals.createMinMax(0, 0, 0,0,xLen-1, yLen-1, 0, zLen-1))
c0.setName("c0")
# crop both channels at z=12
z12=ops.run("transform.crop",img, Intervals.createMinMax(0,0,0,12, xLen-1, yLen-1, cLen-1, 12))
z12.setName("z12")
# crop channel 0 at z=12
c0z12=ops.run("transform.crop",img, Intervals.createMinMax(0,0,0,12, xLen-1, yLen-1, 0, 12))
c0z12.setName("c0z12")
# crop an roi at channel 0, z=12
roiC0z12=ops.run("transform.crop",img, Intervals.createMinMax(150,150,0,12, 200, 200, 0, 12))
roiC0z12.setName("roiC0z12")
|
gpl-3.0
| -3,989,707,301,982,040,000
| 32.3
| 95
| 0.731231
| false
| 2.576402
| false
| false
| false
|
severin-lemaignan/dialogs
|
src/dialogs/verbalization/verbalization_test.py
|
1
|
179763
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Chouayakh Mahdi
08/07/2010
The package contains functions to perform test
It is more used for the subject
Functions:
unit_tests : to perform unit tests
"""
import unittest
import logging
logger = logging.getLogger("dialogs")
from dialogs.dialog_core import Dialog
from dialogs.parsing.parser import Parser
from dialogs.sentence import *
from dialogs.sentence_types import *
from dialogs.verbalization import utterance_rebuilding
class TestVerbalization(unittest.TestCase):
"""
Function to compare 2 nominal groups
"""
def test_01(self):
logger.info('\n######################## test 1.1 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle is on the table. The bottle is blue. The bottle is Blue."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['Blue'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_02(self):
logger.info('\n######################## test 1.2 ##############################')
logger.info('#################################################################\n')
original_utterance = "Jido's blue bottle is on the table. I'll play a guitar, a piano and a violon."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [['blue', []]],
[NominalGroup([], ['Jido'], [], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_03(self):
logger.info('\n######################## test 1.3 ##############################')
logger.info('#################################################################\n')
original_utterance = "It's on the table. I give it to you. Give me the bottle. I don't give the bottle to you."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['it'], [], [], [])],
[IndirectComplement(['to'], [NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement(['to'], [NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_04(self):
logger.info('\n######################## test 1.4 ##############################')
logger.info('#################################################################\n')
original_utterance = "You aren't preparing the car and my father's moto at the same time. Is my brother's bottle in your right?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['prepare'], [], 'present progressive',
[NominalGroup(['the'], ['car'], [], [], []),
NominalGroup(['the'], ['moto'], [],
[NominalGroup(['my'], ['father'], [], [], [])], [])],
[IndirectComplement(['at'], [
NominalGroup(['the'], ['time'], [['same', []]], [], [])])],
[], [], VerbalGroup.negative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup(['the'], ['bottle'], [], [NominalGroup(['my'], ['brother'], [], [], [])],
[])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['your'], ['right'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_05(self):
logger.info('\n######################## test 1.5 ##############################')
logger.info('#################################################################\n')
original_utterance = "You shouldn't drive his poorest uncle's wife's big new car. Should I give you the bottle? Shall I go?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['should+drive'], [], 'present conditional',
[NominalGroup(['the'], ['car'], [['big', []], ['new', []]],
[NominalGroup(['the'], ['wife'], [],
[NominalGroup(['his'], ['uncle'],
[['poorest', []]], [], [])],
[])], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['should+give'], [], 'present conditional',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['shall+go'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_06(self):
logger.info('\n######################## test 1.6 ##############################')
logger.info('#################################################################\n')
original_utterance = "Isn't he doing his homework and his game now? Can't he take this bottle? Hello."
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['do'], [], 'present progressive',
[NominalGroup(['his'], ['homework'], [], [], []),
NominalGroup(['his'], ['game'], [], [], [])],
[],
[], ['now'], VerbalGroup.negative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['can+take'], [], 'present simple',
[NominalGroup(['this'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(START, '', [], [])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_07(self):
logger.info('\n######################## test 1.7 ##############################')
logger.info('#################################################################\n')
original_utterance = "Don't quickly give me the blue bottle. I want to play with my guitar. I'd like to go to the cinema."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [['blue', []]], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
['quickly'], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['want'], [VerbalGroup(['play'],
[], '',
[],
[IndirectComplement(['with'], [
NominalGroup(['my'], ['guitar'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])],
'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['like'], [VerbalGroup(['go'],
[], '',
[],
[IndirectComplement(['to'], [
NominalGroup(['the'], ['cinema'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])],
'present conditional',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_08(self):
logger.info('\n######################## test 1.8 ##############################')
logger.info('#################################################################\n')
original_utterance = "The man who talks, has a new car. I play the guitar that I bought yesterday."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['man'], [], [], [Sentence(RELATIVE, 'who',
[],
[VerbalGroup(['talk'], [],
'present simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])],
[VerbalGroup(['have'], [], 'present simple',
[NominalGroup(['a'], ['car'], [['new', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'present simple',
[NominalGroup(['the'], ['guitar'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(['buy'],
[], 'past simple',
[],
[],
[], ['yesterday'],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_09(self):
logger.info('\n######################## test 1.9 ##############################')
logger.info('#################################################################\n')
original_utterance = "Don't quickly give me the bottle which is on the table, and the glass which I cleaned yesterday, at my left."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'on'],
[
NominalGroup(
[
'the'],
[
'table'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])]),
NominalGroup(['the'], ['glass'], [], [], [Sentence(RELATIVE, 'which',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(
['clean'], [],
'past simple',
[],
[],
[], ['yesterday'],
VerbalGroup.affirmative,
[])])])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])]),
IndirectComplement(['at'],
[NominalGroup(['my'], ['left'], [], [], [])])],
['quickly'], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_10(self):
logger.info('\n######################## test 1.10 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle that I bought from the store which is in the shopping center, is yours."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([], ['I'], [], [],
[])],
[VerbalGroup(['buy'], [],
'past simple',
[],
[IndirectComplement(
['from'], [
NominalGroup(
['the'],
['store'],
[], [], [
Sentence(
RELATIVE,
'which',
[],
[
VerbalGroup(
[
'be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'in'],
[
NominalGroup(
[
'the'],
[
'center'],
[
[
'shopping',
[]]],
[],
[])])],
[],
[],
VerbalGroup.affirmative,
[])])])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['yours'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_11(self):
logger.info('\n######################## test 1.11 ##############################')
logger.info('#################################################################\n')
original_utterance = "When won't the planning session take place? When must you take the bus?"
sentences = [Sentence(W_QUESTION, 'date',
[NominalGroup(['the'], ['session'], [['planning', []]], [], [])],
[VerbalGroup(['take+place'], [], 'future simple',
[],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(W_QUESTION, 'date',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['must+take'], [], 'present simple',
[NominalGroup(['the'], ['bus'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_12(self):
logger.info('\n######################## test 1.12 ##############################')
logger.info('#################################################################\n')
original_utterance = "Where is Broyen? Where are you going? Where must Jido and you be from?"
sentences = [Sentence(W_QUESTION, 'place',
[NominalGroup([], ['Broyen'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'place',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['go'], [], 'present progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'origin',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['must+be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_13(self):
logger.info('\n######################## test 1.13 ##############################')
logger.info('#################################################################\n')
original_utterance = "What time is the news on TV? What size do you wear? The code is written by me. Is Mahdi going to the Laas?"
sentences = [Sentence(W_QUESTION, 'time',
[NominalGroup(['the'], ['news'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'], [NominalGroup([], ['TV'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'size',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['wear'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['code'], [], [], [])],
[VerbalGroup(['write'], [], 'present passive',
[],
[IndirectComplement(['by'], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['Mahdi'], [], [], [])],
[VerbalGroup(['go'], [], 'present progressive',
[],
[IndirectComplement(['to'],
[NominalGroup(['the'], ['Laas'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_14(self):
logger.info('\n######################## test 1.14 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's the weather like in the winter here? What were you doing? What isn't Jido going to do tomorrow?"
sentences = [Sentence(W_QUESTION, 'description',
[NominalGroup(['the'], ['weather'], [], [], [])],
[VerbalGroup(['like'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['winter'], [], [], [])])],
[], ['here'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [], 'past progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['go'], [VerbalGroup(['do'],
[], '',
[],
[],
[], ['tomorrow'], VerbalGroup.affirmative, [])],
'present progressive',
[],
[],
[], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_15(self):
logger.info('\n######################## test 1.15 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's happening? What must happen in the company today? What didn't happen here? No, sorry."
sentences = [Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['happen'], [], 'present progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['must+happen'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['company'], [], [], [])])],
[], ['today'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['happen'], [], 'past simple',
[],
[],
[], ['here'], VerbalGroup.negative, [])]),
Sentence('disagree', '', [], [])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_16(self):
logger.info('\n######################## test 1.16 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's the biggest bottle's color on your left? What does your brother do for a living?"
sentences = [Sentence(W_QUESTION, 'thing',
[NominalGroup(['the'], ['color'], [],
[NominalGroup(['the'], ['bottle'], [['biggest', []]], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['your'], ['left'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'explication',
[NominalGroup(['your'], ['brother'], [], [], [])],
[VerbalGroup(['do'], [], 'present simple',
[],
[IndirectComplement(['for'],
[NominalGroup(['a'], [], [['living', []]], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_17(self):
logger.info('\n######################## test 1.17 ##############################')
logger.info('#################################################################\n')
original_utterance = "What kind of people don't read this magazine? What kind of music must he listen to everyday?"
sentences = [Sentence(W_QUESTION, 'classification+people',
[],
[VerbalGroup(['read'], [], 'present simple',
[NominalGroup(['this'], ['magazine'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(W_QUESTION, 'classification+music',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['must+listen+to'], [], 'present simple',
[],
[],
[], ['everyday'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_18(self):
logger.info('\n######################## test 1.18 ##############################')
logger.info('#################################################################\n')
original_utterance = "What kind of sport is your favorite? What's the problem with him? What's the matter with this person?"
sentences = [Sentence(W_QUESTION, 'classification+sport',
[NominalGroup(['your'], [], [['favorite', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup(['the'], ['problem'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['with'], [NominalGroup([], ['him'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup(['the'], ['matter'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['this'], ['person'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_19(self):
logger.info('\n######################## test 1.19 ##############################')
logger.info('#################################################################\n')
original_utterance = "How old are you? How long is your uncle's store opened tonight? How long is your uncle's store open tonight?"
sentences = [Sentence(W_QUESTION, 'old',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'long',
[NominalGroup(['the'], ['store'], [], [NominalGroup(['your'], ['uncle'], [], [], [])],
[])],
[VerbalGroup(['open'], [], 'present passive',
[],
[],
[], ['tonight'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'long',
[NominalGroup(['the'], ['store'], [], [NominalGroup(['your'], ['uncle'], [], [], [])],
[])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['open', []]], [], [])],
[],
[], ['tonight'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_20(self):
logger.info('\n######################## test 1.20 ##############################')
logger.info('#################################################################\n')
original_utterance = "How far is it from the hotel to the restaurant? How soon can you be here? How often does Jido go skiing?"
sentences = [Sentence(W_QUESTION, 'far',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['from'],
[NominalGroup(['the'], ['hotel'], [], [], [])]),
IndirectComplement(['to'],
[NominalGroup(['the'], ['restaurant'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'soon',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['can+be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'often',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['go+skiing'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_21(self):
logger.info('\n######################## test 1.21 ##############################')
logger.info('#################################################################\n')
original_utterance = "How much water should they transport? How much guests weren't at the party? How much does the motocycle cost?"
sentences = [Sentence(W_QUESTION, 'quantity',
[NominalGroup([], ['they'], [], [], [])],
[VerbalGroup(['should+transport'], [], 'present conditional',
[NominalGroup(['a'], ['water'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'quantity',
[NominalGroup(['a'], ['guests'], [], [], [])],
[VerbalGroup(['be'], [], 'past simple',
[],
[IndirectComplement(['at'],
[NominalGroup(['the'], ['party'], [], [], [])])],
[], [], VerbalGroup.negative, [])]),
Sentence(W_QUESTION, 'quantity',
[NominalGroup(['the'], ['motocycle'], [], [], [])],
[VerbalGroup(['cost'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_22(self):
logger.info('\n######################## test 1.22 ##############################')
logger.info('#################################################################\n')
original_utterance = "How about going to the cinema? How haven't they gotten a loan for their business? OK."
sentences = [Sentence(W_QUESTION, 'invitation',
[],
[VerbalGroup(['go'], [], 'present progressive',
[],
[IndirectComplement(['to'],
[NominalGroup(['the'], ['cinema'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'manner',
[NominalGroup([], ['they'], [], [], [])],
[VerbalGroup(['get'], [], 'present perfect',
[NominalGroup(['a'], ['loan'], [], [], [])],
[IndirectComplement(['for'],
[NominalGroup(['their'], ['business'], [], [], [])])],
[], [], VerbalGroup.negative, [])]),
Sentence(AGREEMENT, '', [], [])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_23(self):
logger.info('\n######################## test 1.23 ##############################')
logger.info('#################################################################\n')
original_utterance = "What did you think of Steven Spilburg's new movie? How could I get to the restaurant from here?"
sentences = [Sentence(W_QUESTION, 'opinion',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['like'], [], 'past simple',
[NominalGroup(['the'], ['movie'], [['new', []]],
[NominalGroup([], ['Steven', 'Spilburg'], [], [], [])],
[])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'manner',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['could+get+to'], [], 'present conditional',
[NominalGroup(['the'], ['restaurant'], [], [], [])],
[IndirectComplement(['from'], [NominalGroup([], ['here'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_24(self):
logger.info('\n######################## test 1.24 ##############################')
logger.info('#################################################################\n')
original_utterance = "Why should she go to Toulouse? Who could you talk to on the phone? Whose blue bottle and red glass are these?"
sentences = [Sentence(W_QUESTION, 'reason',
[NominalGroup([], ['she'], [], [], [])],
[VerbalGroup(['should+go'], [], 'present conditional',
[],
[IndirectComplement(['to'],
[NominalGroup([], ['Toulouse'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'people',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['could+talk+to'], [], 'present conditional',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['phone'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'owner',
[NominalGroup([], ['bottle'], [['blue', []]], [], []),
NominalGroup([], ['glass'], [['red', []]], [], [])],
[VerbalGroup(['be'], [], '',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_25(self):
logger.info('\n######################## test 1.25 ##############################')
logger.info('#################################################################\n')
original_utterance = "What are you thinking about the idea that I present you? What color is the bottle which you bought?"
sentences = [Sentence(W_QUESTION, 'opinion',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['think+about'], [], 'present progressive',
[NominalGroup(['the'], ['idea'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([],
['I'], [], [], [])],
[VerbalGroup(
['present'], [],
'present simple',
[],
[
IndirectComplement(
[], [
NominalGroup(
[],
[
'you'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'color',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'which',
[NominalGroup([], ['you'], [], [],
[])],
[VerbalGroup(['buy'], [],
'past simple',
[],
[],
[], [],
VerbalGroup.affirmative,
[])])])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_26(self):
logger.info('\n######################## test 1.26 ##############################')
logger.info('#################################################################\n')
original_utterance = "Which salesperson's competition won the award which we won in the last years?"
sentences = [Sentence(W_QUESTION, 'choice',
[NominalGroup(['the'], ['competition'], [],
[NominalGroup(['the'], ['salesperson'], [], [], [])], [])],
[VerbalGroup(['win'], [], 'past simple',
[NominalGroup(['the'], ['award'], [], [], [Sentence(RELATIVE, 'which',
[NominalGroup([],
['we'], [], [],
[])],
[VerbalGroup(['win'],
[], 'past simple',
[],
[
IndirectComplement(
[
'in'],
[
NominalGroup(
[
'the'],
[
'year'],
[
[
'last',
[]]],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[0].relative[0].sv[0].i_cmpl[0].gn[0]._quantifier = "ALL"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_27(self):
logger.info('\n######################## test 1.27 ##############################')
logger.info('#################################################################\n')
original_utterance = "What will your house look like? What do you think of the latest novel which Jido wrote?"
sentences = [Sentence(W_QUESTION, 'description',
[NominalGroup(['your'], ['house'], [], [], [])],
[VerbalGroup(['look+like'], [], 'future simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'opinion',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['think+of'], [], 'present simple',
[NominalGroup(['the'], ['novel'], [['latest', []]], [],
[Sentence(RELATIVE, 'which',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['write'], [], 'past simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_28(self):
logger.info('\n######################## test 1.28 ##############################')
logger.info('#################################################################\n')
original_utterance = "Learn that I want you to give me the blue bottle. You'll be happy, if you do your job."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['learn'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'that',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['want'], [
VerbalGroup(['give'], [], '',
[NominalGroup(['the'], [
'bottle'], [['blue',
[]]],
[], [])],
[IndirectComplement([],
[NominalGroup([],
['me'], [], [],
[])])],
[], [], VerbalGroup.affirmative,
[])], 'present simple',
[NominalGroup([], ['you'],
[], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['be'], [], 'future simple',
[NominalGroup([], [], [['happy', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'if',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [],
'present simple',
[NominalGroup(['your'],
['job'], [],
[], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_29(self):
logger.info('\n######################## test 1.29 ##############################')
logger.info('#################################################################\n')
original_utterance = "You'll be happy, if you do your job. Do you want the blue or green bottle?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['be'], [], 'future simple',
[NominalGroup([], [], [['happy', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'if',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [],
'present simple',
[NominalGroup(['your'],
['job'], [],
[], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['want'], [],
'present simple',
[NominalGroup(['the'], [], [['blue', []]], [], []),
NominalGroup([], ['bottle'], [['green', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].d_obj[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_30(self):
logger.info('\n######################## test 1.30 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's wrong with him? I'll play a guitar or a piano and a violon. I played a guitar a year ago."
sentences = [Sentence(W_QUESTION, 'thing',
[NominalGroup([], [], [['wrong', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['with'], [NominalGroup([], ['him'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'past simple',
[NominalGroup(['a'], ['guitar'], [], [], [])],
[IndirectComplement(['ago'],
[NominalGroup(['a'], ['year'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].d_obj[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_31(self):
logger.info('\n######################## test 1.31 ##############################')
logger.info('#################################################################\n')
original_utterance = "Who are you talking to? You should have the bottle. Would you've played a guitar? You'd have played a guitar."
sentences = [Sentence(W_QUESTION, 'people',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['talk+to'], [], 'present progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['should+have'], [], 'present conditional',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['play'], [], 'past conditional',
[NominalGroup(['a'], ['guitar'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['play'], [], 'past conditional',
[NominalGroup(['a'], ['guitar'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_32(self):
logger.info('\n######################## test 1.32 ##############################')
logger.info('#################################################################\n')
original_utterance = "What do you do for a living in this building? What does your brother do for a living here?"
sentences = [Sentence(W_QUESTION, 'explication',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [], 'present simple',
[],
[IndirectComplement(['for'],
[NominalGroup(['a'], [], [['living', []]], [], [])]),
IndirectComplement(['in'],
[NominalGroup(['this'], ['building'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'explication',
[NominalGroup(['your'], ['brother'], [], [], [])],
[VerbalGroup(['do'], [], 'present simple',
[],
[IndirectComplement(['for'],
[NominalGroup(['a'], [], [['living', []]], [], [])])],
[], ['here'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_33(self):
logger.info('\n######################## test 1.33 ##############################')
logger.info('#################################################################\n')
original_utterance = "This is a bottle. There is a bottle on the table."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['this'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['a'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['there'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['a'], ['bottle'], [], [], [])],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_34(self):
logger.info('\n######################## test 1.34 ##############################')
logger.info('#################################################################\n')
original_utterance = "Is it on the table or the shelf?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])]),
IndirectComplement([], [NominalGroup(['the'], ['shelf'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_35(self):
logger.info('\n######################## test 1.35 ##############################')
logger.info('#################################################################\n')
original_utterance = "Where is it? On the table or on the shelf?"
sentences = [Sentence(W_QUESTION, 'place',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[],
[VerbalGroup([], [], '',
[],
[IndirectComplement(['on'], [
NominalGroup(['the'], ['table'], [], [], [])]),
IndirectComplement(['on'], [
NominalGroup(['the'], ['shelf'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_36(self):
logger.info('\n######################## test 1.36 ##############################')
logger.info('#################################################################\n')
original_utterance = "Is it on your left or in front of you?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['your'], ['left'], [], [], [])]),
IndirectComplement(['in+front+of'],
[NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_37(self):
logger.info('\n######################## test 1.37 ##############################')
logger.info('#################################################################\n')
original_utterance = "Where is it? On your left or in front of you?"
sentences = [Sentence(W_QUESTION, 'place',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], [], [], [], [])],
[VerbalGroup([], [], '',
[],
[IndirectComplement(['on'], [
NominalGroup(['your'], ['left'], [], [], [])]),
IndirectComplement(['in+front+of'],
[NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_38(self):
logger.info('\n######################## test 1.38 ##############################')
logger.info('#################################################################\n')
original_utterance = "The blue bottle? What do you mean?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup(['the'], ['bottle'], [['blue', []]], [], [])],
[]),
Sentence(W_QUESTION, 'thing',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['mean'], [], 'present simple', [], [], [], [], VerbalGroup.affirmative,
[])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_39(self):
logger.info('\n######################## test 1.39 ##############################')
logger.info('#################################################################\n')
original_utterance = "Would you like the blue bottle or the glass? The green or blue bottle is on the table. Is the green or blue glass mine?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['like'], [], 'present conditional',
[NominalGroup(['the'], ['bottle'], [['blue', []]], [], []),
NominalGroup(['the'], ['glass'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], [], [['green', []]], [], []),
NominalGroup([], ['bottle'], [['blue', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup(['the'], [], [['green', []]], [], []),
NominalGroup([], ['glass'], [['blue', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['mine'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[1]._conjunction = "OR"
sentences[1].sn[1]._conjunction = "OR"
sentences[2].sn[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_40(self):
logger.info('\n######################## test 1.40 ##############################')
logger.info('#################################################################\n')
original_utterance = "Learn that I want you to give me the blue bottle that's blue."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['learn'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'that',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['want'], [
VerbalGroup(['give'], [], '',
[NominalGroup(['the'], [
'bottle'], [['blue',
[]]],
[], [Sentence(
RELATIVE, 'that',
[],
[VerbalGroup(
['be'], [],
'present simple',
[
NominalGroup(
[],
[], [
[
'blue',
[]]],
[],
[])],
[],
[], [],
VerbalGroup.affirmative,
[])])])],
[IndirectComplement([],
[NominalGroup([],
['me'], [], [],
[])])],
[], [], VerbalGroup.affirmative,
[])], 'present simple',
[NominalGroup([], ['you'],
[], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_41(self):
logger.info('\n######################## test 1.41 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle is behind to me. The bottle is next to the table in front of the kitchen."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['behind+to'],
[NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['next+to'],
[NominalGroup(['the'], ['table'], [], [], [])]),
IndirectComplement(['in+front+of'],
[NominalGroup(['the'], ['kitchen'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_42(self):
logger.info('\n######################## test 1.42 ##############################')
logger.info('#################################################################\n')
original_utterance = "Carefully take the bottle. I take that bottle that I drink in. I take 22 bottles."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['take'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[],
['carefully'], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['take'], [], 'present simple',
[NominalGroup(['that'], ['bottle'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(
['drink'], [],
'present simple',
[],
[
IndirectComplement(
['in'],
[])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['take'], [], 'present simple',
[NominalGroup(['22'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[2].sv[0].d_obj[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_43(self):
logger.info('\n######################## test 1.43 ##############################')
logger.info('#################################################################\n')
original_utterance = "I'll play Jido's guitar, a saxophone, my oncle's wife's piano and Patrick's violon."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['the'], ['guitar'], [],
[NominalGroup([], ['Jido'], [], [], [])], []),
NominalGroup(['a'], ['saxophone'], [], [], []),
NominalGroup(['a'], ['piano'], [], [NominalGroup(['the'], ['wife'], [], [
NominalGroup(['my'], ['oncle'], [], [], [])], [])], []),
NominalGroup(['the'], ['violon'], [],
[NominalGroup([], ['Patrick'], [], [], [])], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_44(self):
logger.info('\n######################## test 1.44 ##############################')
logger.info('#################################################################\n')
original_utterance = "Give me 2 or 3 bottles. The bottle is blue big funny. Give me the bottle which is on the table."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['2'], [], [], [], []),
NominalGroup(['3'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []], ['big', []], ['funny', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'on'],
[
NominalGroup(
[
'the'],
[
'table'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[1]._conjunction = "OR"
sentences[0].sv[0].d_obj[0]._quantifier = "DIGIT"
sentences[0].sv[0].d_obj[1]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_45(self):
logger.info('\n######################## test 1.45 ##############################')
logger.info('#################################################################\n')
original_utterance = "The boys' ball is blue. He asks me to do something. Is any person courageous on the laboratory?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['ball'], [], [NominalGroup(['the'], ['boy'], [], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['ask'], [VerbalGroup(['do'], [], '',
[NominalGroup([], ['something'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])], 'present simple',
[NominalGroup([], ['me'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup(['any'], ['person'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['courageous', []]], [], [])],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['laboratory'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0].noun_cmpl[0]._quantifier = "ALL"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_46(self):
logger.info('\n######################## test 1.46 ##############################')
logger.info('#################################################################\n')
original_utterance = "What must be happened in the company today? The building shouldn't fastly be built. You can be here."
sentences = [Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['must+happen'], [], 'present passive',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['company'], [], [], [])])],
[], ['today'], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['building'], [], [], [])],
[VerbalGroup(['should+build'], [], 'passive conditional',
[],
[],
['fastly'], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['can+be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_47(self):
logger.info('\n######################## test 1.47 ##############################')
logger.info('#################################################################\n')
original_utterance = "What size is the best one? What object is blue? How good is this?"
sentences = [Sentence(W_QUESTION, 'size',
[NominalGroup(['the'], ['one'], [['best', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'object',
[],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'good',
[NominalGroup(['this'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_48(self):
logger.info('\n######################## test 1.48 ##############################')
logger.info('#################################################################\n')
original_utterance = "Patrick, the bottle is on the table. Give it to me."
sentences = [Sentence('interjection', '',
[NominalGroup([], ['Patrick'], [], [], [])],
[]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Patrick'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['it'], [], [], [])],
[IndirectComplement(['to'], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_49(self):
logger.info('\n######################## test 1.49 ##############################')
logger.info('#################################################################\n')
original_utterance = "Jido, give me the bottle. Jido, Patrick and you will go to the cinema. Jido, Patrick and you, give me the bottle."
sentences = [Sentence('interjection', '',
[NominalGroup([], ['Jido'], [], [], [])],
[]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['Patrick'], [], [], []),
NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['go'], [], 'future simple',
[],
[IndirectComplement(['to'],
[NominalGroup(['the'], ['cinema'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence('interjection', '',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['Patrick'], [], [], []),
NominalGroup([], ['you'], [], [], [])],
[]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['Patrick'], [], [], []),
NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_50(self):
logger.info('\n######################## test 1.50 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle isn't blue but it's red. It isn't the glass but the bottle. It's blue or red."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.negative, [Sentence('subsentence', 'but',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [],
[['red', []]], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'], ['glass'], [], [], []),
NominalGroup(['the'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], []),
NominalGroup([], [], [['red', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].d_obj[1]._conjunction = "BUT"
sentences[2].sv[0].d_obj[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_51(self):
logger.info('\n######################## test 1.51 ##############################')
logger.info('#################################################################\n')
original_utterance = "It isn't red but blue. This is my banana. Bananas are fruits."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['red', []]], [], []),
NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['this'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['my'], ['banana'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['banana'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['fruit'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[1]._conjunction = "BUT"
sentences[2].sn[0]._quantifier = "ALL"
sentences[2].sv[0].d_obj[0]._quantifier = "ALL"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_52(self):
logger.info('\n######################## test 1.52 ##############################')
logger.info('#################################################################\n')
original_utterance = "There are no bananas. All bananas are here. Give me more information which are about the bottle."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['there'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['no'], ['banana'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['all'], ['banana'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['more'], ['information'], [], [],
[Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['about'], [
NominalGroup(['the'],
['bottle'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])])])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0]._quantifier = "SOME"
sentences[0].sv[0].d_obj[0]._quantifier = "ANY"
sentences[1].sn[0]._quantifier = "ALL"
sentences[2].sv[0].d_obj[0]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_53(self):
logger.info('\n######################## test 1.53 ##############################')
logger.info('#################################################################\n')
original_utterance = "Jido, tell me where you go. Goodbye. There is nothing. It's another one."
sentences = [Sentence('interjection', '',
[NominalGroup([], ['Jido'], [], [], [])],
[]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['tell'], [], 'present simple',
[],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'where',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['go'], [],
'present simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(END, '', [], []),
Sentence(STATEMENT, '',
[NominalGroup(['there'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['nothing'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['another'], ['one'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_54(self):
logger.info('\n######################## test 1.54 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle becomes blue. 1 piece could become 2, if you smoldered it."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['become'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['1'], ['piece'], [], [], [])],
[VerbalGroup(['could+become'], [], 'present conditional',
[NominalGroup(['2'], [], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'if',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['smolder'], [],
'past simple',
[NominalGroup([], ['it'],
[], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_55(self):
logger.info('\n######################## test 1.55 ##############################')
logger.info('#################################################################\n')
original_utterance = "This one isn't my uncle's bottle but it's my brother's bottle. It isn't on the table but on the shelf."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['this'], ['one'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [],
[NominalGroup(['my'], ['uncle'], [], [], [])], [])],
[],
[], [], VerbalGroup.negative, [Sentence('subsentence', 'but',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'],
['bottle'], [],
[NominalGroup(
['my'],
['brother'],
[], [],
[])], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'], [NominalGroup(['the'], ['table'], [], [], []),
NominalGroup(['the'], ['shelf'], [], [],
[])])],
[], [], VerbalGroup.negative, [])])]
sentences[1].sv[0].i_cmpl[0].gn[1]._conjunction = "BUT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_56(self):
logger.info('\n######################## test 1.56 ##############################')
logger.info('#################################################################\n')
original_utterance = "Give me the fourth and seventh bottle. Give me the one thousand ninth and the thirty thousand twenty eighth bottle."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], [], [['fourth', []]], [], []),
NominalGroup([], ['bottle'], [['seventh', []]], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], [], [['one+thousand+ninth', []]], [], []),
NominalGroup(['the'], ['bottle'], [['thirty+thousand+twenty+eighth', []]],
[], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_57(self):
logger.info('\n######################## test 1.57 ##############################')
logger.info('#################################################################\n')
original_utterance = "The evil tyran is in the laboratory. I don't know what you're talking about."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['tyran'], [['evil', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['laboratory'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['know'], [], 'present simple',
[],
[],
[], [], VerbalGroup.negative, [Sentence('subsentence', 'what',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['talk'], [],
'present progressive',
[],
[IndirectComplement(['about'],
[])],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_58(self):
logger.info('\n######################## test 1.58 ##############################')
logger.info('#################################################################\n')
original_utterance = "I go to the place where I was born. I study where you studied. I study where you build your house where you put the bottle."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['go'], [], 'present simple',
[],
[IndirectComplement(['to'], [
NominalGroup(['the'], ['place'], [], [], [Sentence(RELATIVE, 'where',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(
['be'], [],
'past simple',
[NominalGroup(
[], [], [[
'born',
[]]],
[], [])],
[],
[], [],
VerbalGroup.affirmative,
[])])])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['study'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'where',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['study'], [],
'past simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['study'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'where',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['build'], [],
'present simple',
[NominalGroup(['your'],
['house'],
[], [], [Sentence(
RELATIVE, 'where',
[NominalGroup([],
['you'], [], [],
[])],
[VerbalGroup(
['put'], [],
'present simple',
[NominalGroup(
['the'],
['bottle'],
[], [],
[])],
[],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_59(self):
logger.info('\n######################## test 1.59 ##############################')
logger.info('#################################################################\n')
original_utterance = "Apples grow on trees and plants. Give me 3 apples."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['apple'], [], [], [])],
[VerbalGroup(['grow'], [], 'present simple',
[],
[IndirectComplement(['on'], [NominalGroup([], ['tree'], [], [], []),
NominalGroup([], ['plant'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['3'], ['apple'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[1]._quantifier = "ALL"
sentences[1].sv[0].d_obj[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_60(self):
logger.info('\n######################## test 1.56 ##############################')
logger.info('#################################################################\n')
original_utterance = "We were preparing the dinner when your father came. He made a sandwich which is with bacon, while I phoned."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['we'], [], [], [])],
[VerbalGroup(['prepare'], [], 'past progressive',
[NominalGroup(['the'], ['dinner'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'when',
[NominalGroup(['your'], ['father'], [],
[], [])],
[VerbalGroup(['come'], [], 'past simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['make'], [], 'past simple',
[NominalGroup(['a'], ['sandwich'], [], [], [Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'with'],
[
NominalGroup(
[],
[
'bacon'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'while',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['phone'], [],
'past simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_61(self):
logger.info('\n######################## test 1.54 ##############################')
logger.info('#################################################################\n')
original_utterance = "The big very strong man is on the corner. The too big very strong man is on the corner."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['man'], [['big', []], ['strong', ['very']]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['corner'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['man'], [['big', ['too']], ['strong', ['very']]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['corner'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_62(self):
logger.info('\n######################## test 1.55 ##############################')
logger.info('#################################################################\n')
original_utterance = "Red apples grow on green trees and plants. A kind of thing. It can be played by 30028 players."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['apple'], [['red', []]], [], [])],
[VerbalGroup(['grow'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup([], ['tree'], [['green', []]], [], []),
NominalGroup([], ['plant'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['a'], ['kind'], [], [NominalGroup(['a'], ['thing'], [], [], [])], [])],
[]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['can+play'], [], 'present passive',
[],
[IndirectComplement(['by'],
[NominalGroup(['30028'], ['player'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[1]._quantifier = "ALL"
sentences[1].sn[0]._quantifier = "SOME"
sentences[1].sn[0].noun_cmpl[0]._quantifier = "SOME"
sentences[2].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_63(self):
logger.info('\n######################## test 1.56 ##############################')
logger.info('#################################################################\n')
original_utterance = "Let the man go to the cinema. Is it the time to let you go? Where is the other tape?"
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['let'], [VerbalGroup(['go'],
[], '',
[],
[IndirectComplement(['to'], [
NominalGroup(['the'], ['cinema'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])], 'present simple',
[NominalGroup(['the'], ['man'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [VerbalGroup(['let'],
[VerbalGroup(['go'],
[], '',
[],
[],
[], [], VerbalGroup.affirmative, [])], '',
[NominalGroup([], ['you'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])], 'present simple',
[NominalGroup(['the'], ['time'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'place',
[NominalGroup(['the'], ['tape'], [['other', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_64(self):
print('')
print('######################## test 1.57 ##############################')
print('#################################################################')
print('')
original_utterance = "And now, can you reach the tape. it could have been them. It is just me at the door. A strong clause can stand on its own."
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['can+reach'], [], 'present simple',
[NominalGroup(['the'], ['tape'], [], [], [])],
[],
[], ['now'], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['could+be'], [], 'passive conditional',
[NominalGroup([], ['them'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['me'], [], [], [])],
[IndirectComplement(['at'],
[NominalGroup(['the'], ['door'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['a'], ['clause'], [['strong', []]], [], [])],
[VerbalGroup(['can+stand'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['its'], ['own'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_65(self):
print('')
print('######################## test 1.58 ##############################')
print('#################################################################')
print('')
original_utterance = "Tell me what to do. No, I can not reach it."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['tell'], [], 'present simple',
[],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])]),
IndirectComplement([],
[NominalGroup(['the'], ['thing'], [], [], [Sentence(RELATIVE, 'that',
[],
[VerbalGroup(
['be'], [
VerbalGroup(
[
'do'],
[],
'',
[],
[],
[],
[],
VerbalGroup.affirmative,
[])],
'present simple',
[],
[],
[], [],
VerbalGroup.affirmative,
[])])])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(DISAGREEMENT, '', [], []),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['can+reach'], [], 'present simple',
[NominalGroup([], ['it'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_66(self):
print('')
print('######################## test 1.59 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll come back on Monday. I'll play with a guitar. I'll play football."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come+back'], [], 'future simple',
[],
[IndirectComplement(['on'], [NominalGroup([], ['Monday'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['a'], ['guitar'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup([], ['football'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_67(self):
print('')
print('######################## test 1.60 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll play a guitar, a piano and a violon. I'll play with a guitar, a piano and a violon. Give me everything."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['everything'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[0]._quantifier = "SOME"
sentences[0].sv[0].d_obj[1]._quantifier = "SOME"
sentences[0].sv[0].d_obj[2]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[1]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[2]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_68(self):
print('')
print('######################## test 1.61 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll come back at 7 o'clock tomorrow. He finishes the project 10 minutes before."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come+back'], [], 'future simple',
[],
[IndirectComplement(['at'],
[NominalGroup(['7'], ["o'clock"], [], [], [])])],
[], ['tomorrow'], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['finish'], [], 'present simple',
[NominalGroup(['the'], ['project'], [], [], [])],
[IndirectComplement(['before'],
[NominalGroup(['10'], ['minute'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_69(self):
print('')
print('######################## test 1.62 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll play a guitar, a piano and a violon. I'll play with a guitar, a piano and a violon. The boss, you and me are here."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['boss'], [], [], []), NominalGroup([], ['you'], [], [], []),
NominalGroup([], ['me'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[0]._quantifier = "SOME"
sentences[0].sv[0].d_obj[1]._quantifier = "SOME"
sentences[0].sv[0].d_obj[2]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[1]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[2]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_70(self):
print('')
print('######################## test 1.63 ##############################')
print('#################################################################')
print('')
original_utterance = "A speaking sentence's time is the best. I come at 10 pm. I'll come an evening tomorrow."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['time'], [],
[NominalGroup(['a'], ['sentence'], [['speaking', []]], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'], [], [['best', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come'], [], 'present simple',
[],
[IndirectComplement(['at'], [NominalGroup(['10'], ['pm'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come'], [], 'future simple',
[],
[IndirectComplement([], [NominalGroup(['an'], ['evening'], [], [], [])])],
[], ['tomorrow'], VerbalGroup.affirmative, [])])]
sentences[0].sn[0].noun_cmpl[0]._quantifier = 'SOME'
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
sentences[2].sv[0].i_cmpl[0].gn[0]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
class TestVerbalizationCompleteLoop(unittest.TestCase):
def setUp(self):
self.dialog = Dialog()
self.dialog.start()
def tearDown(self):
self.dialog.stop()
self.dialog.join()
def test_verbalize1(self):
logger.info("\n##################### test_verbalize1: simple statements ########################\n")
myP = Parser()
stmt = "The cup is on the desk."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "The green bottle is next to Joe."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize2(self):
logger.info("\n##################### test_verbalize2: yes/no questions ########################\n")
myP = Parser()
stmt = "Are you a robot?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize3(self):
logger.info("\n##################### test_verbalize3: orders ########################\n")
myP = Parser()
stmt = "Put the yellow banana on the shelf."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Give me the green banana."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Give the green banana to me."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Get the box which is on the table."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Get the box which is in the trashbin."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize4(self):
logger.info("\n##################### test_verbalize4: W questions ########################\n")
myP = Parser()
stmt = "Where is the box?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "What are you doing now?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('input: ' + stmt)
logger.info('output:' + res)
self.assertEqual(stmt, res)
def test_verbalize5(self):
logger.info("\n##################### test_verbalize5 ########################\n")
myP = Parser()
stmt = "Jido, tell me where you go."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize6(self):
logger.info("\n##################### test_verbalize 6 ########################\n")
myP = Parser()
stmt = "What blue object do you know?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestVerbalization)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestVerbalizationCompleteLoop))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(test_suite())
|
bsd-3-clause
| -5,733,380,990,628,415,000
| 62.141201
| 161
| 0.281649
| false
| 6.24676
| true
| false
| false
|
ryanpdwyer/sigutils
|
sigutils/fdls.py
|
1
|
1223
|
"""
Frequency Domain Least Squares
==============================
This algorithm tries to approximate an analytic transfer function :math:`H(s)`.
See digital signal processing book.
- Pick an analytic transfer function H(s)
- Select the numerator order N and denominator order D
- Define M separate input u_m coside sequences, each of length N + 1
- Compute M output y_m cosine sequences, each of length D
- X = ( y(-1)...y(-D) u(0)...u(-N) )
- Y = A_m cos(phi_m)
- Compute the psuedo-inverse
"""
import numpy as np
def butter_lp(f, f0):
return 1/(1+f*1j/f0)
# Let's approximate this with a 1st order top and bottom filter function
# def fdls(N, D, M):
# k = np.arange(-N, 0.5)
# np.arange()
# A few lines on the frequency domain least squares algorithm
# See http://dx.doi.org/10.1109/MSP.2007.273077
# import numpy.linalg
# fs = 1000
# f0 = 10
# m = 8192
# n = 513
# d = 0
# f = np.linspace(-0.5, 0.5, m) // All frequencies
# tm = np.arange(-n,0.5,1) // All times
# zf = butter_lp(f, f0/fs)
# af = np.abs(zf)
# pf = -1 * np.angle(zf)
# np.cos(2*np.pi*f[0]*tm)
# f2d, t2d = np.meshgrid(f, t)
# u = np.cos(2*np.pi*f2d*t2d)
# X = u
# Y = af*np.cos(pf)
# X1 = np.linalg.pinv(X)
# out = np.dot(Y, X1)
|
mit
| 7,364,699,537,436,800,000
| 24.5
| 79
| 0.621423
| false
| 2.547917
| false
| false
| false
|
redhat-openstack/glance
|
glance/tests/unit/v2/test_registry_client.py
|
1
|
24550
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import uuid
from mock import patch
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance.openstack.common import timeutils
from glance.registry.api import v2 as rserver
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.tests.unit import base
from glance.tests import utils as test_utils
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = str(uuid.uuid4())
UUID2 = str(uuid.uuid4())
#NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
virtual_size=26, properties={'type': 'kernel'},
location="swift://user:passwd@acct/container/obj.tar.0",
created_at=uuid1_time),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19, virtual_size=38,
location="file:///tmp/glance-tests/2",
created_at=uuid2_time)]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(len(images), 2)
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='name', sort_dir='asc')
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by status in
descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='status', sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by disk_format in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='disk_format',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by container_format in
descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='container_format',
sort_dir='desc')
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""
Tests that the registry API returns list of
public images sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami',
size=100, virtual_size=200)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare',
size=2, virtual_size=4)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='size', sort_dir='asc')
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=False)
def test_get_index_sort_created_at_asc(self):
"""
Tests that the registry API returns list of
public images sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='created_at',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3),
unjsonify=False)
def test_get_index_sort_updated_at_desc(self):
"""
Tests that the registry API returns list of
public images sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=None,
updated_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=None,
updated_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='updated_at',
sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_image_get_index_marker(self):
"""Test correct set of images returned with marker param."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID3)
self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False)
def test_image_get_index_limit(self):
"""Test correct number of images returned with limit param."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=2)
self.assertEqual(len(images), 2)
def test_image_get_index_marker_limit(self):
"""Test correct set of images returned with marker/limit params."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID4, limit=1)
self.assertEqualImages(images, (UUID2,), unjsonify=False)
def test_image_get_index_limit_None(self):
"""Test correct set of images returned with limit param == None."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=None)
self.assertEqual(len(images), 4)
def test_image_get_index_by_name(self):
"""
Test correct set of public, name-filtered image returned. This
is just a sanity check, we test the details call more in-depth.
"""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(filters={'name': 'new name! #123'})
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_image_get_is_public_v2(self):
"""Tests that a detailed call can be filtered by a property"""
extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving',
properties={'is_public': 'avalue'})
context = copy.copy(self.context)
db_api.image_create(context, extra_fixture)
filters = {'is_public': 'avalue'}
images = self.client.image_get_all(filters=filters)
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('avalue', image['properties'][0]['value'])
def test_image_get(self):
"""Tests that the detailed info about an image returned"""
fixture = self.get_fixture(id=UUID1, name='fake image #1',
is_public=False, size=13, virtual_size=26,
disk_format='ami', container_format='ami')
data = self.client.image_get(image_id=UUID1)
for k, v in fixture.items():
el = data[k]
self.assertEqual(v, data[k],
"Failed v != data[k] where v = %(v)s and "
"k = %(k)s and data[k] = %(el)s" %
dict(v=v, k=k, el=el))
def test_image_get_non_existing(self):
"""Tests that NotFound is raised when getting a non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_get,
image_id=_gen_uuid())
def test_image_create_basic(self):
"""Tests that we can add image metadata and returns the new id"""
fixture = self.get_fixture()
new_image = self.client.image_create(values=fixture)
# Test all other attributes set
data = self.client.image_get(image_id=new_image['id'])
for k, v in fixture.items():
self.assertEqual(v, data[k])
# Test status was updated properly
self.assertIn('status', data)
self.assertEqual('active', data['status'])
def test_image_create_with_properties(self):
"""Tests that we can add image metadata with properties"""
fixture = self.get_fixture(location="file:///tmp/glance-tests/2",
properties={'distro': 'Ubuntu 10.04 LTS'})
new_image = self.client.image_create(values=fixture)
self.assertIn('properties', new_image)
self.assertEqual(new_image['properties'][0]['value'],
fixture['properties']['distro'])
del fixture['location']
del fixture['properties']
for k, v in fixture.items():
self.assertEqual(v, new_image[k])
# Test status was updated properly
self.assertIn('status', new_image.keys())
self.assertEqual('active', new_image['status'])
def test_image_create_already_exists(self):
"""Tests proper exception is raised if image with ID already exists"""
fixture = self.get_fixture(id=UUID2,
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Duplicate,
self.client.image_create,
values=fixture)
def test_image_create_with_bad_status(self):
"""Tests proper exception is raised if a bad status is set"""
fixture = self.get_fixture(status='bad status',
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Invalid,
self.client.image_create,
values=fixture)
def test_image_update(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': 'saving'}
self.assertTrue(self.client.image_update(image_id=UUID2,
values=fixture))
# Test all other attributes set
data = self.client.image_get(image_id=UUID2)
for k, v in fixture.items():
self.assertEqual(v, data[k])
def test_image_update_conflict(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual(current, 'active')
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Conflict, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
try:
self.client.image_update(image_id=UUID2, values=fixture,
from_state=from_state)
except exception.Conflict as exc:
msg = (_('cannot transition from %(current)s to '
'%(next)s in update (wanted '
'from_state=%(from)s)') %
{'current': current, 'next': next_state,
'from': from_state})
self.assertEqual(str(exc), msg)
def _test_image_update_not_existing(self):
"""Tests non existing image update doesn't work"""
fixture = self.get_fixture(status='bad status')
self.assertRaises(exception.NotFound,
self.client.image_update,
image_id=_gen_uuid(),
values=fixture)
def test_image_destroy(self):
"""Tests that image metadata is deleted properly"""
# Grab the original number of images
orig_num_images = len(self.client.image_get_all())
# Delete image #2
image = self.FIXTURES[1]
deleted_image = self.client.image_destroy(image_id=image['id'])
self.assertTrue(deleted_image)
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
# Verify one less image
filters = {'deleted': False}
new_num_images = len(self.client.image_get_all(filters=filters))
self.assertEqual(new_num_images, orig_num_images - 1)
def test_image_destroy_not_existing(self):
"""Tests cannot delete non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_destroy,
image_id=_gen_uuid())
def test_image_get_members(self):
"""Tests getting image members"""
memb_list = self.client.image_member_find(image_id=UUID2)
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_image_get_members_not_existing(self):
"""Tests getting non-existent image members"""
self.assertRaises(exception.NotFound,
self.client.image_get_members,
image_id=_gen_uuid())
def test_image_member_find(self):
"""Tests getting member images"""
memb_list = self.client.image_member_find(member='pattieblack')
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_add_update_members(self):
"""Tests updating image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.assertTrue(member)
values['member'] = 'pattieblack2'
self.assertTrue(self.client.image_member_update(memb_id=member['id'],
values=values))
def test_add_delete_member(self):
"""Tests deleting image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.client.image_member_delete(memb_id=member['id'])
memb_list = self.client.image_member_find(member='pattieblack')
self.assertEqual(len(memb_list), 0)
class TestRegistryV2ClientApi(base.IsolatedUnitTest):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2ClientApi, self).setUp()
reload(rapi)
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2ClientApi, self).tearDown()
def test_configure_registry_client_not_using_use_user_token(self):
self.config(use_user_token=False)
with patch.object(rapi,
'configure_registry_admin_creds') as mock_rapi:
rapi.configure_registry_client()
mock_rapi.assert_called_once_with()
def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'):
return {
'user': 'user',
'password': 'password',
'username': 'user',
'tenant': 'tenant',
'auth_url': auth_url,
'strategy': strategy,
'region': 'region'
}
def test_configure_registry_admin_creds(self):
expected = self._get_fake_config_creds(auth_url=None,
strategy='configured_strategy')
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_strategy=expected['strategy'])
self.config(auth_region=expected['region'])
self.stubs.Set(os, 'getenv', lambda x: None)
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
def test_configure_registry_admin_creds_with_auth_url(self):
expected = self._get_fake_config_creds()
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_url=expected['auth_url'])
self.config(auth_strategy='test_strategy')
self.config(auth_region=expected['region'])
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
|
apache-2.0
| 5,718,802,242,312,375,000
| 37.479624
| 79
| 0.565743
| false
| 4.133693
| true
| false
| false
|
Freso/listenbrainz-server
|
listenbrainz/labs_api/labs/api/recording_from_recording_mbid.py
|
1
|
5867
|
import psycopg2
import psycopg2.extras
from flask import current_app
from datasethoster import Query
psycopg2.extras.register_uuid()
class RecordingFromRecordingMBIDQuery(Query):
'''
Look up a musicbrainz data for a list of recordings, based on MBID.
'''
def names(self):
return ("recording-mbid-lookup", "MusicBrainz Recording by MBID Lookup")
def inputs(self):
return ['[recording_mbid]']
def introduction(self):
return """Look up recording and artist information given a recording MBID"""
def outputs(self):
return ['recording_mbid', 'recording_name', 'length', 'comment', 'artist_credit_id',
'artist_credit_name', '[artist_credit_mbids]', 'original_recording_mbid']
def fetch(self, params, offset=-1, count=-1):
mbids = [p['[recording_mbid]'] for p in params]
with psycopg2.connect(current_app.config['MB_DATABASE_URI']) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
# First lookup and MBIDs that may have been redirected
query = '''SELECT rgr.gid::TEXT AS recording_mbid_old,
r.gid::TEXT AS recording_mbid_new
FROM recording_gid_redirect rgr
JOIN recording r
ON r.id = rgr.new_id
where rgr.gid in %s'''
args = [tuple([psycopg2.extensions.adapt(p) for p in mbids])]
curs.execute(query, tuple(args))
# Build an index with all redirected recordings
redirect_index = {}
inverse_redirect_index = {}
while True:
row = curs.fetchone()
if not row:
break
r = dict(row)
redirect_index[r['recording_mbid_old']] = r['recording_mbid_new']
inverse_redirect_index[r['recording_mbid_new']] = r['recording_mbid_old']
# Now start looking up actual recordings
for i, mbid in enumerate(mbids):
if mbid in redirect_index:
mbids[i] = redirect_index[mbid]
query = '''SELECT r.gid::TEXT AS recording_mbid,
r.name AS recording_name,
r.length,
r.comment,
ac.id AS artist_credit_id,
ac.name AS artist_credit_name,
array_agg(a.gid)::TEXT[] AS artist_credit_mbids
FROM recording r
JOIN artist_credit ac
ON r.artist_credit = ac.id
JOIN artist_credit_name acn
ON ac.id = acn.artist_credit
JOIN artist a
ON acn.artist = a.id
WHERE r.gid
IN %s
GROUP BY r.gid, r.id, r.name, r.length, r.comment, ac.id, ac.name
ORDER BY r.gid'''
args = [tuple([psycopg2.extensions.adapt(p) for p in mbids])]
curs.execute(query, tuple(args))
# Build an index of all the fetched recordings
recording_index = {}
while True:
row = curs.fetchone()
if not row:
break
recording_index[row['recording_mbid']] = dict(row)
# Finally collate all the results, ensuring that we have one entry with original_recording_mbid for each
# input argument
output = []
for p in params:
mbid = p['[recording_mbid]']
try:
r = dict(recording_index[mbid])
except KeyError:
try:
r = dict(recording_index[redirect_index[mbid]])
except KeyError:
output.append({'recording_mbid': None,
'recording_name': None,
'length': None,
'comment': None,
'artist_credit_id': None,
'artist_credit_name': None,
'[artist_credit_mbids]': None,
'original_recording_mbid': mbid})
continue
r['[artist_credit_mbids]'] = [ac_mbid for ac_mbid in r['artist_credit_mbids']]
del r['artist_credit_mbids']
r['original_recording_mbid'] = inverse_redirect_index.get(mbid, mbid)
output.append(r)
# Ideally offset and count should be handled by the postgres query itself, but the 1:1 relationship
# of what the user requests and what we need to fetch is no longer true, so we can't easily use LIMIT/OFFSET.
# We might be able to use a RIGHT JOIN to fix this, but for now I'm happy to leave this as it. We need to
# revisit this when we get closer to pushing recommendation tools to production.
if offset > 0 and count > 0:
return output[offset:offset+count]
if offset > 0 and count < 0:
return output[offset:]
if offset < 0 and count > 0:
return output[:count]
return output
|
gpl-2.0
| 760,555,066,453,154,600
| 43.112782
| 125
| 0.46736
| false
| 4.90142
| false
| false
| false
|
sagarjauhari/BCIpy
|
eegml.py
|
1
|
10074
|
# /usr/bin/env python
# Copyright 2013, 2014 Justis Grant Peters and Sagar Jauhari
# This file is part of BCIpy.
#
# BCIpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BCIpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BCIpy. If not, see <http://www.gnu.org/licenses/>.
import csv
import time
import re
from datetime import datetime
from decimal import Decimal
from matplotlib import *
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 15, 6
from os import listdir
from os.path import join, isfile
import numpy as np
import pandas as pd
import pickle
from scipy.stats.stats import pearsonr
import warnings
warnings.filterwarnings('ignore', 'DeprecationWarning')
try: # Import config params
import dev_settings as config
except ImportError:
print "Please create a dev_settings.py using dev_settings.py.example as an example"
def print_config():
print config.DATA_URL
print config.SAVE_URL
def format_time(ti):
"""
Converts format '2010-12-14 16:56:36.996' to Decimal
"""
to = datetime.strptime(ti, '%Y-%m-%d %H:%M:%S.%f')
#Decimal not losing precision
to = Decimal(to.strftime('%s.%f'))
return str(to)
def format_task_xls(indir, outdir):
path_task_xls = join(indir, "task.xls")
path_task_xls_labels = join(outdir, "task_xls_labels.csv")
with open(path_task_xls, 'rb') as fi,\
open(path_task_xls_labels, 'w') as fo:
fr = csv.reader(fi, delimiter='\t')
fw = csv.writer(fo, delimiter='\t')
h = fr.next()
fw.writerow(['taskid',h[0], h[1], h[2], h[3], h[-1]]) #header
for idx, row in enumerate(fr):
row[2] = format_time(row[2])
row[3] = format_time(row[3])
fw.writerow([idx, row[0], row[1], row[2], row[3], row[-1]])
def label_data(in_file, out_file, compressed_label_file, subj_t, time_t, dbg=False):
if dbg: print "#"+subj_t + "--------"
with open(in_file, 'rb') as fi,\
open(out_file, 'rb') as fi2,\
open(compressed_label_file, 'w') as fo:
day = time_t[0:4]+"-"+time_t[4:6]+"-"+time_t[6:8]
fr1 = csv.reader(fi, delimiter=',') # combined.csv
fr2 = csv.reader(fi2, delimiter='\t')# xls_labels.csv
fw = csv.writer(fo, delimiter='\t')# combined_label_uncompress.csv
if dbg: print "day: " + day
#headers
fw.writerow(next(fr1, None) + ['Difficulty', 'taskid'] )
next(fr2, None)
#forward till subject data starts
lab_row = fr2.next()
while subj_t != lab_row[2]:
lab_row = fr2.next()
if dbg: print "start: " + str(lab_row[0])
for idx, row in enumerate(fr1):
row[0] = datetime.strptime(day+' '+row[0]+'.0',\
'%Y-%m-%d %H:%M:%S.%f').strftime('%s.%f')
if Decimal(row[0]) < Decimal(lab_row[3]): # t < start_time
if dbg: print str(idx)+": t<start_time"
label = -1
fw.writerow(row + [label, lab_row[0]])
continue
if Decimal(row[0]) <= Decimal(lab_row[4]): # t <= end_time
if dbg: print str(idx)+": t <= end_time"
label = lab_row[5]
fw.writerow(row + [label, lab_row[0]])
continue
while Decimal(row[0] > lab_row[4]): # t > end_time
try:
lab_row = next(fr2)
label = lab_row[5]
if lab_row[2] != subj_t:
raise Exception("Reached end of data for subject" + subj_t)
except Exception as e: # reached end of file, or next subject
label = -1
if dbg: print e
break
fw.writerow(row + [label,lab_row[0]])
if dbg: print "end: "+str(lab_row[0])
return
def plot_signal(x_ax, y_ax, label, ax=None):
if ax==None:
fig, ax = plt.subplots()
ax.plot(x_ax, y_ax, label=label)
ax.grid(True)
fig.tight_layout()
plt.legend(loc='upper left')
plt.show()
return ax
def create_sub_dict(indir):
""" Create dict of subject data [1Hz conmbined files]"""
onlyfiles = [ f for f in listdir(indir) if isfile(join(indir,f)) ]
pat = re.compile("[0-9]*\.[0-9]*\.combined\.csv")
temp_dat = [f.split('.')[0:2] for f in onlyfiles if pat.match(f)]
sub_dict = {i[1]: i[0] for i in temp_dat}
return sub_dict
def label_sub_files(indir, outdir):
""" Label each subject file [1Hz conmbined files]"""
sub_dict = create_sub_dict(indir)
for i in sub_dict:
label_data(indir + "/"+sub_dict[i] + "." +i+".combined.csv",
outdir + "/task_xls_labels.csv",
outdir + "/"+sub_dict[i] + "." +i+".labelled.csv",
i, sub_dict[i])
def get_subject_list(dir_url):
onlyfiles = [ f for f in listdir(dir_url) if isfile(join(dir_url,f)) ]
pat = re.compile("[0-9]*\.[0-9]*\.labelled\.csv")
temp_dat = [f.split('.')[0:2] for f in onlyfiles if pat.match(f)]
sub_dict = {i[1]: i[0] for i in temp_dat}
return sub_dict
def get_data(subj_list, dir_url):
subj_data = {}
for s_id in subj_list.keys():
s_time = subj_list[s_id]
s_file = s_time + "." + s_id + ".labelled.csv"
with open(join(dir_url,s_file), 'rb') as fi:
fr = csv.reader(fi,delimiter="\t")
next(fr) #header
s_data = list(fr)
subj_data[int(s_id)] = s_data
return subj_data
def plot_subject(s_comb, pdfpages, title=None):
"""
Plot each subject's data (1Hz)
"""
fig, ax = plt.subplots()
x_ax = [int(i[0].split('.')[0]) for i in s_comb]
sig_q = [int(i[1]) for i in s_comb]
atten = [int(i[2]) for i in s_comb]
medit = [int(i[3]) for i in s_comb]
diffi = [int(i[4])*50 for i in s_comb]
taskid= [int(i[5]) for i in s_comb]
taskid_set = list(set(taskid))
taskid_norm = [taskid_set.index(i) for i in taskid]
ax.plot(x_ax, sig_q, label='Quality')
ax.plot(x_ax, atten, label='Attention')
ax.plot(x_ax, medit, label='Meditation')
ax.plot(x_ax, diffi, label='Difficulty')
#ax.plot(x_ax, taskid_norm, label='taskid')
ax.grid(True)
fig.tight_layout()
plt.legend(loc='upper left')
plt.title(title)
pdfpages.savefig(fig)
return
def plot_subjects(subj_list, data, pdfpages, count=None):
for i in range(count if count else len(subj_list.keys())):
s1 = subj_list.keys()[i]
plot_subject(data[int(s1)], pdfpages, "Subject: "+s1)
return
def plot_avg_rows(targets, features, pdfpages, n, title):
"""
Given targets (difficulty) and features, plot the average of each features
grouped by the difficulty.
"""
print "Plotting Avg of dataframe"
avg_all = features.mean()
features['difficulty']=targets
grouped = features.groupby(by='difficulty')
fig, ax = plt.subplots()
ax.plot(avg_all, label='all')
for d in range(1, 5):
ax.plot(grouped.get_group(d).mean()[0:n-1],
label="difficulty: %d (%d tasks)" % (d,len(grouped.get_group(d))))
plt.legend(loc='upper right')
plt.title(title)
ax.grid(True)
pdfpages.savefig(fig)
def get_num_words(DATA_URL):
path_task_xls = DATA_URL + "/task.xls"
with open(path_task_xls, 'rb') as fi:
fr = csv.reader(fi, delimiter='\t')
next(fr)#header
data = list(fr)
data_cols = zip(*data)
l=len(data_cols[0])
num_words_stim = [float(len(i.split())) for i in data_cols[4]]
num_chars_stim = [float(len(i)) for i in data_cols[4]]
difficulty = [float(i) for i in data_cols[-1]]
time_diff = [float(Decimal(format_time(data_cols[3][i]))-\
Decimal(format_time(data_cols[2][i])))\
for i in xrange(l)]
time_per_word = [time_diff[i]/num_words_stim[i] for i in range(l)]
time_per_char = [time_diff[i]/num_chars_stim[i] for i in range(l)]
sentence_idx=[i for i in xrange(l) if num_words_stim[i] > 1]
print pearsonr(time_per_word, difficulty)
print pearsonr(time_per_char, difficulty)
print pearsonr([time_per_word[i] for i in sentence_idx],
[difficulty[i] for i in sentence_idx])
print pearsonr([time_per_char[i] for i in sentence_idx],
[difficulty[i] for i in sentence_idx])
tpa = [difficulty[i] for i in sentence_idx]
plt.hist(tpa)
def get_performance(x,y):
""" Measures the performance metrics for x(actual)
and y (experimental).
"""
if len(x) != len(y):
print "Error: Lengths not same"
return
TP = FN = FP = TN = 0.0
for i in range(0,len(x)):
for j in range(0, len(x)):
if i == j:
continue
if x[i]==x[j] and y[i]==y[j]:
TP = TP + 1
elif x[i]!=x[j] and y[i]!=y[j]:
TN = TN + 1
elif x[i]==x[j] and y[i]!=y[j]:
FN = FN + 1
elif x[i]!=x[j] and y[i]==y[j]:
FP = FP + 1
TP = TP/2
TN = TN/2
FN = FN/2
FP = FP/2
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = TP/(TP + FP)
recall = TP/(TP + FN)
fscore = 2*precision*recall/(precision + recall)
print " Accuracy: \t" + str(round(accuracy, 3))
print " Precision: \t" + str(round(precision, 3))
print " Recall: \t" + str(round(recall, 3))
print " F-Score: \t" + str(round(fscore, 3))
|
gpl-3.0
| 8,123,222,228,558,660,000
| 32.247525
| 87
| 0.562835
| false
| 3.11118
| false
| false
| false
|
ospalh/kajongg-fork
|
src/hand.py
|
1
|
30524
|
# -*- coding: utf-8 -*-
"""Copyright (C) 2009-2012 Wolfgang Rohdewald <wolfgang@rohdewald.de>
kajongg is free software you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Read the user manual for a description of the interface to this scoring engine
"""
from util import logDebug
from meld import Meld, meldKey, meldsContent, Pairs, CONCEALED
from rule import Score, Ruleset
from common import elements, Debug
class UsedRule(object):
"""use this in scoring, never change class Rule.
If the rule has been used for a meld, pass it"""
def __init__(self, rule, meld=None):
self.rule = rule
self.meld = meld
def __str__(self):
result = self.rule.name
if self.meld:
result += ' ' + str(self.meld)
return result
def __repr__(self):
return 'UsedRule(%s)' % str(self)
class Hand(object):
"""represent the hand to be evaluated"""
# pylint: disable=R0902
# pylint we need more than 10 instance attributes
cache = dict()
misses = 0
hits = 0
@staticmethod
def clearCache(game):
"""clears the cache with Hands"""
if Debug.handCache and Hand.cache:
game.debug('cache hits:%d misses:%d' % (Hand.hits, Hand.misses))
Hand.cache.clear()
Hand.hits = 0
Hand.misses = 0
@staticmethod
def cached(ruleset, string, computedRules=None, robbedTile=None):
"""since a Hand instance is never changed, we can use a cache"""
if computedRules is not None and not isinstance(computedRules, list):
computedRules = list([computedRules])
cRuleHash = '&&'.join([rule.name for rule in computedRules]) if computedRules else 'None'
if isinstance(ruleset, Hand):
cacheId = id(ruleset.player or ruleset.ruleset)
else:
cacheId = id(ruleset)
cacheKey = hash((cacheId, string, robbedTile, cRuleHash))
cache = Hand.cache
if cacheKey in cache:
if cache[cacheKey] is None:
raise Exception('recursion: Hand calls itself for same content')
Hand.hits += 1
return cache[cacheKey]
Hand.misses += 1
cache[cacheKey] = None
result = Hand(ruleset, string,
computedRules=computedRules, robbedTile=robbedTile)
cache[cacheKey] = result
return result
def __init__(self, ruleset, string, computedRules=None, robbedTile=None):
"""evaluate string using ruleset. rules are to be applied in any case.
ruleset can be Hand, Game or Ruleset."""
# silence pylint. This method is time critical, so do not split it into smaller methods
# pylint: disable=R0902,R0914,R0912,R0915
if isinstance(ruleset, Hand):
self.ruleset = ruleset.ruleset
self.player = ruleset.player
self.computedRules = ruleset.computedRules
elif isinstance(ruleset, Ruleset):
self.ruleset = ruleset
self.player = None
else:
self.player = ruleset
self.ruleset = self.player.game.ruleset
self.string = string
self.robbedTile = robbedTile
if computedRules is not None and not isinstance(computedRules, list):
computedRules = list([computedRules])
self.computedRules = computedRules or []
self.__won = False
self.mjStr = ''
self.mjRule = None
self.ownWind = None
self.roundWind = None
tileStrings = []
mjStrings = []
haveM = False
splits = self.string.split()
for part in splits:
partId = part[0]
if partId in 'Mmx':
haveM = True
self.ownWind = part[1]
self.roundWind = part[2]
mjStrings.append(part)
self.__won = partId == 'M'
elif partId == 'L':
if len(part[1:]) > 8:
raise Exception('last tile cannot complete a kang:' + self.string)
mjStrings.append(part)
else:
tileStrings.append(part)
if not haveM:
raise Exception('Hand got string without mMx: %s', self.string)
self.mjStr = ' '.join(mjStrings)
self.__lastTile = self.__lastSource = self.__announcements = ''
self.__lastMeld = 0
self.__lastMelds = []
self.hiddenMelds = []
self.declaredMelds = []
self.melds = []
tileString = ' '.join(tileStrings)
self.bonusMelds, tileString = self.__separateBonusMelds(tileString)
self.tileNames = Pairs(tileString.replace(' ','').replace('R', ''))
self.tileNames.sort()
self.values = ''.join(x[1] for x in self.tileNames)
self.suits = set(x[0].lower() for x in self.tileNames)
self.lenOffset = self.__computeLenOffset(tileString)
self.dragonMelds, self.windMelds = self.__computeDragonWindMelds(tileString)
self.__separateMelds(tileString)
self.hiddenMelds = sorted(self.hiddenMelds, key=meldKey)
self.tileNamesInHand = sum((x.pairs for x in self.hiddenMelds), [])
self.sortedMeldsContent = meldsContent(self.melds)
if self.bonusMelds:
self.sortedMeldsContent += ' ' + meldsContent(self.bonusMelds)
self.usedRules = []
self.score = None
oldWon = self.won
self.__applyRules()
if len(self.lastMelds) > 1:
self.__applyBestLastMeld()
if self.won != oldWon:
# if not won after all, this might be a long hand.
# So we might even have to unapply meld rules and
# bonus points. Instead just recompute all again.
# This should only happen with scoring manual games
# and with scoringtest - normally kajongg would not
# let you declare an invalid mah jongg
self.__applyRules()
@property
def lastTile(self):
"""compute and cache, readonly"""
if self.__lastTile == '':
self.__setLastTile()
return self.__lastTile
@property
def lastSource(self):
"""compute and cache, readonly"""
if self.__lastTile == '':
self.__setLastTile()
return self.__lastSource
@property
def announcements(self):
"""compute and cache, readonly"""
if self.__lastTile == '':
self.__setLastTile()
return self.__announcements
@property
def lastMeld(self):
"""compute and cache, readonly"""
if self.__lastMeld == 0:
self.__setLastMeld()
return self.__lastMeld
@property
def lastMelds(self):
"""compute and cache, readonly"""
if self.__lastMeld == 0:
self.__setLastMeld()
return self.__lastMelds
@property
def won(self):
"""have we been modified since load or last save?
The "won" value is set to True when instantiating the hand,
according to the mMx in the init string. Later on, it may
only be cleared."""
return self.__won
@won.setter
def won(self, value):
"""must never change to True"""
value = bool(value)
assert not value
self.__won = value
self.string = self.string.replace(' M', ' m')
self.mjStr = self.mjStr.replace(' M', ' m')
def debug(self, msg, btIndent=None):
"""try to use Game.debug so we get a nice prefix"""
if self.player:
self.player.game.debug(msg, btIndent=btIndent)
else:
logDebug(msg, btIndent=btIndent)
def __applyRules(self):
"""find out which rules apply, collect in self.usedRules.
This may change self.won"""
self.usedRules = list([UsedRule(rule) for rule in self.computedRules])
if self.__hasExclusiveRules():
return
self.__applyMeldRules()
self.__applyHandRules()
if self.__hasExclusiveRules():
return
self.score = self.__totalScore()
# do the rest only if we know all tiles of the hand
if 'Xy' in self.string:
self.won = False # we do not know better
return
if self.won:
matchingMJRules = self.__maybeMahjongg()
if not matchingMJRules:
self.won = False
self.score = self.__totalScore()
return
self.mjRule = matchingMJRules[0]
self.usedRules.append(UsedRule(self.mjRule))
if self.__hasExclusiveRules():
return
self.usedRules.extend(self.matchingWinnerRules())
self.score = self.__totalScore()
else: # not self.won
assert self.mjRule is None
loserRules = self.__matchingRules(self.ruleset.loserRules)
if loserRules:
self.usedRules.extend(list(UsedRule(x) for x in loserRules))
self.score = self.__totalScore()
def matchingWinnerRules(self):
"""returns a list of matching winner rules"""
matching = self.__matchingRules(self.ruleset.winnerRules)
for rule in matching:
if (self.ruleset.limit and rule.score.limits >= 1) or 'absolute' in rule.options:
return [UsedRule(rule)]
return list(UsedRule(x) for x in matching)
def __hasExclusiveRules(self):
"""if we have one, remove all others"""
exclusive = list(x for x in self.usedRules if 'absolute' in x.rule.options)
if exclusive:
self.usedRules = exclusive
self.score = self.__totalScore()
self.won = self.__maybeMahjongg()
return bool(exclusive)
def __setLastTile(self):
"""sets lastTile, lastSource, announcements"""
self.__announcements = ''
self.__lastTile = None
self.__lastSource = None
parts = self.mjStr.split()
for part in parts:
if part[0] == 'L':
part = part[1:]
if len(part) > 2:
self.__lastMeld = Meld(part[2:])
self.__lastTile = part[:2]
elif part[0] == 'M':
if len(part) > 3:
self.__lastSource = part[3]
if len(part) > 4:
self.__announcements = part[4:]
if self.__lastTile:
assert self.__lastTile in self.tileNames, 'lastTile %s is not in tiles %s, mjStr=%s' % (
self.__lastTile, ' '.join(self.tileNames), self.mjStr)
if self.__lastSource == 'k':
assert self.tileNames.count(self.__lastTile.lower()) + \
self.tileNames.count(self.__lastTile.capitalize()) == 1, \
'Robbing kong: I cannot have lastTile %s more than once in %s' % (
self.__lastTile, ' '.join(self.tileNames))
def __setLastMeld(self):
"""sets the shortest possible last meld. This is
not yet the final choice, see __applyBestLastMeld"""
self.__lastMeld = None
if self.lastTile and self.won:
if hasattr(self.mjRule.function, 'computeLastMelds'):
self.__lastMelds = self.mjRule.function.computeLastMelds(self)
if self.__lastMelds:
# syncHandBoard may return nothing
if len(self.__lastMelds) == 1:
self.__lastMeld = self.__lastMelds[0]
else:
totals = sorted((len(x), idx) for idx, x in enumerate(self.__lastMelds))
self.__lastMeld = self.__lastMelds[totals[0][1]]
if not self.__lastMeld:
self.__lastMeld = Meld([self.lastTile])
self.__lastMelds = [self.__lastMeld]
def __applyBestLastMeld(self):
"""select the last meld giving the highest score (only winning variants)"""
assert len(self.lastMelds) > 1
totals = []
prev = self.lastMeld
for lastMeld in self.lastMelds:
self.__lastMeld = lastMeld
self.__applyRules()
totals.append((self.won, self.__totalScore().total(), lastMeld))
if any(x[0] for x in totals): # if any won
totals = list(x[1:] for x in totals if x[0]) # remove lost variants
totals = sorted(totals) # sort by totalScore
maxScore = totals[-1][0]
totals = list(x[1] for x in totals if x[0] == maxScore)
# now we have a list of only lastMelds reaching maximum score
if prev not in totals or self.__lastMeld not in totals:
if Debug.explain and prev not in totals:
if not self.player or not self.player.game.belongsToRobotPlayer():
self.debug('replaced last meld %s with %s' % (prev, totals[0]))
self.__lastMeld = totals[0]
self.__applyRules()
def __sub__(self, tiles):
"""returns a copy of self minus tiles. Case of tiles (hidden
or exposed) is ignored. If the tile is not hidden
but found in an exposed meld, this meld will be hidden with
the tile removed from it. Exposed melds of length<3 will also
be hidden."""
# pylint: disable=R0912
# pylint says too many branches
if not isinstance(tiles, list):
tiles = list([tiles])
hidden = 'R' + ''.join(self.tileNamesInHand)
# exposed is a deep copy of declaredMelds. If lastMeld is given, it
# must be first in the list.
exposed = (Meld(x) for x in self.declaredMelds)
if self.lastMeld:
exposed = sorted(exposed, key=lambda x: (x.pairs != self.lastMeld.pairs, meldKey(x)))
else:
exposed = sorted(exposed, key=meldKey)
bonus = sorted(Meld(x) for x in self.bonusMelds)
for tile in tiles:
assert isinstance(tile, str) and len(tile) == 2, 'Hand.__sub__:%s' % tiles
if tile.capitalize() in hidden:
hidden = hidden.replace(tile.capitalize(), '', 1)
elif tile[0] in 'fy': # bonus tile
for idx, meld in enumerate(bonus):
if tile == meld.pairs[0]:
del bonus[idx]
break
else:
for idx, meld in enumerate(exposed):
if tile.lower() in meld.pairs:
del meld.pairs[meld.pairs.index(tile.lower())]
del exposed[idx]
meld.conceal()
hidden += meld.joined
break
for idx, meld in enumerate(exposed):
if len(meld.pairs) < 3:
del exposed[idx]
meld.conceal()
hidden += meld.joined
mjStr = self.mjStr
if self.lastTile in tiles:
parts = mjStr.split()
newParts = []
for idx, part in enumerate(parts):
if part[0] == 'M':
part = 'm' + part[1:]
if len(part) > 3 and part[3] == 'k':
part = part[:3]
elif part[0] == 'L':
continue
newParts.append(part)
mjStr = ' '.join(newParts)
newString = ' '.join([hidden, meldsContent(exposed), meldsContent(bonus), mjStr])
return Hand.cached(self, newString, self.computedRules)
def manualRuleMayApply(self, rule):
"""returns True if rule has selectable() and applies to this hand"""
if self.won and rule in self.ruleset.loserRules:
return False
if not self.won and rule in self.ruleset.winnerRules:
return False
return rule.selectable(self) or rule.appliesToHand(self) # needed for activated rules
def callingHands(self, wanted=1, excludeTile=None, mustBeAvailable=False):
"""the hand is calling if it only needs one tile for mah jongg.
Returns up to 'wanted' hands which would only need one tile.
If mustBeAvailable is True, make sure the missing tile might still
be available.
"""
result = []
string = self.string
if ' x' in string or self.lenOffset:
return result
for rule in self.ruleset.mjRules:
# sort only for reproducibility
if not hasattr(rule, 'winningTileCandidates'):
raise Exception('rule %s, code=%s has no winningTileCandidates' % (
rule.name, rule.function))
candidates = sorted(x.capitalize() for x in rule.winningTileCandidates(self))
for tileName in candidates:
if excludeTile and tileName == excludeTile.capitalize():
continue
if mustBeAvailable and not self.player.tileAvailable(tileName, self):
continue
hand = self.picking(tileName)
if hand.won:
result.append(hand)
if len(result) == wanted:
break
if len(result) == wanted:
break
return result
def __maybeMahjongg(self):
"""check if this is a mah jongg hand.
Return a sorted list of matching MJ rules, highest
total first"""
if not self.won:
return []
if self.lenOffset != 1:
return []
matchingMJRules = [x for x in self.ruleset.mjRules if x.appliesToHand(self)]
if self.robbedTile and self.robbedTile.istitle():
# Millington 58: robbing hidden kong is only allowed for 13 orphans
matchingMJRules = [x for x in matchingMJRules if 'mayrobhiddenkong' in x.options]
return sorted(matchingMJRules, key=lambda x: -x.score.total())
def splitRegex(self, rest):
"""split rest into melds as good as possible"""
rest = ''.join(rest)
melds = []
for rule in self.ruleset.splitRules:
splits = rule.apply(rest)
while len(splits) >1:
for split in splits[:-1]:
melds.append(Meld(split))
rest = splits[-1]
splits = rule.apply(rest)
if len(splits) == 0:
break
if len(splits) == 1 :
assert Meld(splits[0]).isValid() # or the splitRules are wrong
return melds
def __recurse(self, cVariants, foundMelds, rest, maxPairs, color):
"""build the variants recursively"""
melds = []
for value in set(rest):
intValue = int(value)
if rest.count(value) == 3:
melds.append([value] * 3)
elif rest.count(value) == 2:
melds.append([value] * 2)
if rest.count(str(intValue + 1)) and rest.count(str(intValue + 2)):
melds.append([value, str(intValue+1), str(intValue+2)])
pairsFound = sum(len(x) == 2 for x in foundMelds)
for meld in (m for m in melds if len(m) !=2 or pairsFound < maxPairs):
restCopy = rest[:]
for value in meld:
restCopy.remove(value)
newMelds = foundMelds[:]
newMelds.append(meld)
if restCopy:
self.__recurse(cVariants, newMelds, restCopy, maxPairs, color)
else:
for idx, newMeld in enumerate(newMelds):
newMelds[idx] = ''.join(color+x for x in newMeld)
cVariants.append(' '.join(sorted(newMelds )))
def genVariants(self, original0, maxPairs=1):
"""generates all possible meld variants out of original
where original is a list of tile values like ['1','1','2']"""
color = original0[0][0]
original = [x[1] for x in original0]
cVariants = []
self.__recurse(cVariants, [], original, maxPairs, color)
gVariants = []
for cVariant in set(cVariants):
melds = [Meld(x) for x in cVariant.split()]
gVariants.append(melds)
if not gVariants:
gVariants.append(self.splitRegex(original0)) # fallback: nothing useful found
return gVariants
# TODO: get rid of __split, the mjRules should do that if they need it at all
# only __split at end of Hand.__init__, now we do it twice for winning hands
def __split(self, rest):
"""work hard to always return the variant with the highest Mah Jongg value.
Adds melds to self.melds.
only one special mjRule may try to rearrange melds.
A rest will be rearranged by standard rules."""
if 'Xy' in rest:
# hidden tiles of other players:
self.melds.extend(self.splitRegex(rest))
return
arrangements = []
for mjRule in self.ruleset.mjRules:
func = mjRule.function
if func.__class__.__name__ == 'StandardMahJongg':
stdMJ = func
if self.mjRule:
rules = [self.mjRule]
else:
rules = self.ruleset.mjRules
for mjRule in rules:
func = mjRule.function
if func != stdMJ and hasattr(func, 'rearrange'):
if ((self.lenOffset == 1 and func.appliesToHand(self))
or (self.lenOffset < 1 and func.shouldTry(self))):
melds, pairs = func.rearrange(self, rest[:])
if melds:
arrangements.append((mjRule, melds, pairs))
if arrangements:
# TODO: we should know for each arrangement how many tiles for MJ are still needed.
# If len(pairs) == 4, one or up to three might be needed. That would allow for better AI.
# TODO: if hand just completed and we did not win, only try stdmj
arrangement = sorted(arrangements, key=lambda x: len(x[2]))[0]
self.melds.extend(arrangement[1])
self.melds.extend([Meld(x) for x in arrangement[2]])
assert len(''.join(x.joined for x in self.melds)) == len(self.tileNames) * 2, '%s != %s' % (
meldsContent(self.melds), self.tileNames)
else:
# stdMJ is special because it might build more than one pair
# the other special hands would put that into the rest
# if the above TODO is done, stdMJ does not have to be special anymore
melds, _ = stdMJ.rearrange(self, rest[:])
self.melds.extend(melds)
assert len(''.join(x.joined for x in self.melds)) == len(self.tileNames) * 2, '%s != %s' % (
meldsContent(self.melds), self.tileNames)
def countMelds(self, key):
"""count melds having key"""
result = 0
if isinstance(key, str):
for meld in self.melds:
if meld.tileType() in key:
result += 1
else:
for meld in self.melds:
if key(meld):
result += 1
return result
def __matchingRules(self, rules):
"""return all matching rules for this hand"""
return list(rule for rule in rules if rule.appliesToHand(self))
def __applyMeldRules(self):
"""apply all rules for single melds"""
for rule in self.ruleset.meldRules:
for meld in self.melds + self.bonusMelds:
if rule.appliesToMeld(self, meld):
self.usedRules.append(UsedRule(rule, meld))
def __applyHandRules(self):
"""apply all hand rules for both winners and losers"""
for rule in self.ruleset.handRules:
if rule.appliesToHand(self):
self.usedRules.append(UsedRule(rule))
def __totalScore(self):
"""use all used rules to compute the score"""
pointsTotal = Score(ruleset=self.ruleset)
maxLimit = 0.0
maxRule = None
for usedRule in self.usedRules:
score = usedRule.rule.score
if score.limits:
# we assume that a hand never gets different limits combined
maxLimit = max(maxLimit, score.limits)
maxRule = usedRule
else:
pointsTotal += score
if maxLimit:
if maxLimit >= 1.0 or maxLimit * self.ruleset.limit > pointsTotal.total():
self.usedRules = [maxRule]
return Score(ruleset=self.ruleset, limits=maxLimit)
return pointsTotal
def total(self):
"""total points of hand"""
return self.score.total()
def __computeLenOffset(self, tileString):
"""lenOffset is <0 for short hand, 0 for correct calling hand, >0 for long hand.
Of course ignoring bonus tiles.
if there are no kongs, 13 tiles will return 0"""
result = len(self.tileNames) - 13
for split in tileString.split():
if split[0] != 'R':
if Meld(split).isKong():
result -= 1
return result
@staticmethod
def __computeDragonWindMelds(tileString):
"""returns lists with melds containing all (even single)
dragons respective winds"""
dragonMelds = []
windMelds = []
for split in tileString.split():
if split[0] == 'R':
pairs = Pairs(split[1:])
for lst, tiles in ((windMelds, elements.wINDS), (dragonMelds, elements.dRAGONS)):
for tile in tiles:
count = pairs.count(tile)
if count:
lst.append(Meld([tile] * count))
elif split[0] in 'dD':
dragonMelds.append(Meld(split))
elif split[0] in 'wW':
windMelds.append(Meld(split))
return dragonMelds, windMelds
@staticmethod
def __separateBonusMelds(tileString):
"""keep them separate. One meld per bonus tile. Others depend on that."""
result = []
if 'f' in tileString or 'y' in tileString:
for pair in Pairs(tileString.replace(' ','').replace('R', '')):
if pair[0] in 'fy':
result.append(Meld(pair))
tileString = tileString.replace(pair, '', 1)
return result, tileString
def __separateMelds(self, tileString):
"""build a meld list from the hand string"""
# no matter how the tiles are grouped make a single
# meld for every bonus tile
# we need to remove spaces from the hand string first
# for building only pairs with length 2
splits = tileString.split()
rest = ''
for split in splits:
if split[0] == 'R':
rest = split[1:]
else:
meld = Meld(split)
self.melds.append(meld)
self.declaredMelds.append(meld)
if rest:
rest = sorted([rest[x:x+2] for x in range(0, len(rest), 2)])
self.__split(rest)
self.melds = sorted(self.melds, key=meldKey)
for meld in self.melds:
if not meld.isValid():
raise Exception('%s has an invalid meld: %s' % (self.string, meld.joined))
self.__categorizeMelds()
def picking(self, tileName):
"""returns a new Hand built from this one plus tileName"""
assert tileName.istitle(), 'tileName %s should be title:' % tileName
parts = self.string.split()
mPart = ''
rPart = 'R%s' % tileName
unchanged = []
for part in parts:
if part[0] in 'SBCDW':
rPart += part
elif part[0] == 'R':
rPart += part[1:]
elif part[0].lower() == 'm':
mPart = part
elif part[0] == 'L':
pass
else:
unchanged.append(part)
# combine all parts about hidden tiles plus the new one to one part
# because something like DrDrS8S9 plus S7 will have to be reordered
# anyway
# set the "won" flag M
parts = unchanged
parts.extend([rPart, mPart.capitalize(), 'L%s' % tileName])
return Hand.cached(self, ' '.join(parts))
def __categorizeMelds(self):
"""categorize: hidden, declared"""
self.hiddenMelds = []
self.declaredMelds = []
for meld in self.melds:
if meld.state == CONCEALED and not meld.isKong():
self.hiddenMelds.append(meld)
else:
self.declaredMelds.append(meld)
def explain(self):
"""explain what rules were used for this hand"""
result = [x.rule.explain() for x in self.usedRules
if x.rule.score.points]
result.extend([x.rule.explain() for x in self.usedRules
if x.rule.score.doubles])
result.extend([x.rule.explain() for x in self.usedRules
if not x.rule.score.points and not x.rule.score.doubles])
if any(x.rule.debug for x in self.usedRules):
result.append(str(self))
return result
def doublesEstimate(self):
"""this is only an estimate because it only uses meldRules and handRules,
but not things like mjRules, winnerRules, loserRules"""
result = 0
for meld in self.dragonMelds + self.windMelds:
for rule in self.ruleset.doublingMeldRules:
if rule.appliesToMeld(self, meld):
result += rule.score.doubles
for rule in self.ruleset.doublingHandRules:
if rule.appliesToHand(self):
result += rule.score.doubles
return result
def __str__(self):
"""hand as a string"""
return u' '.join([self.sortedMeldsContent, self.mjStr])
def __repr__(self):
"""the default representation"""
return 'Hand(%s)' % str(self)
|
gpl-2.0
| -5,353,195,466,614,206,000
| 39.862115
| 104
| 0.56146
| false
| 3.94265
| false
| false
| false
|
kcsry/wurst
|
wurst/core/migrations/0001_initial.py
|
1
|
4493
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-30 19:14
from __future__ import unicode_literals
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import enumfields.fields
import wurst.core.consts
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(db_index=True, max_length=32, unique=True)),
('title', models.CharField(max_length=140)),
('description', models.TextField(blank=True)),
('start_date', models.DateField(blank=True, null=True)),
('due_date', models.DateField(blank=True, null=True)),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now, editable=False)),
('assignee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='issues_assigned', to=settings.AUTH_USER_MODEL)),
('creator', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='issues_created', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='IssueType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)),
('nouns', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Priority',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)),
('nouns', models.TextField(blank=True)),
('value', models.IntegerField(db_index=True, default=0)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)),
('prefix', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)),
('category', enumfields.fields.EnumIntegerField(db_index=True, default=0, enum=wurst.core.consts.StatusCategory)),
('value', models.IntegerField(db_index=True, default=0)),
],
),
migrations.AddField(
model_name='issue',
name='priority',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='wurst.Priority'),
),
migrations.AddField(
model_name='issue',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='wurst.Project'),
),
migrations.AddField(
model_name='issue',
name='status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='wurst.Status'),
),
migrations.AddField(
model_name='issue',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='wurst.IssueType'),
),
]
|
mit
| -5,385,830,359,492,303,000
| 46.294737
| 191
| 0.594258
| false
| 4.199065
| false
| false
| false
|
kukushdi3981/sel-1_test-project
|
task14_check_handling_new_windows.py
|
1
|
3198
|
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
@pytest.fixture
def driver(request):
wd = webdriver.Chrome() # Optional argument, if not specified will search path.
# wd = webdriver.Ie()
print(wd.capabilities)
# wd.implicitly_wait(10)
request.addfinalizer(wd.quit)
return wd
def login(driver, username, password):
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def logout(driver):
WebDriverWait(driver, 5).until(lambda driver : driver.find_element_by_css_selector("div.header"))
driver.find_element_by_css_selector("a[title='Logout']").click()
def open_add_new_country_page(driver):
WebDriverWait(driver, 5).until(lambda driver : driver.find_element_by_css_selector("div#box-apps-menu-wrapper"))
driver.find_element_by_css_selector("div#box-apps-menu-wrapper a[href$=countries]").click()
# проверяем появления заголовка страницы после нажатия
WebDriverWait(driver, 5).until(lambda driver : driver.find_element_by_css_selector("h1"))
driver.find_element_by_css_selector("#content a.button").click()
def open_and_close_new_windows(webdriver, element):
wait = WebDriverWait(webdriver, 10)
# запоминаем идентификатор текущего окна
main_window = webdriver.current_window_handle
# запоминаем идентификатор уже открытых окон
exist_windows = webdriver.window_handles
# открывает новое окно
element.click()
# ожидание появления нового окна,
# идентификатор которого отсутствует в списке exist_windows
wait.until(lambda webdriver: len(exist_windows) != len(webdriver.window_handles))
handles = webdriver.window_handles
handles.remove(main_window)
# переключаемся в новое окно
webdriver.switch_to_window(handles[0])
# webdriver.switch_to_window(webdriver.window_handles[-1])
# ожидаем загрузки стрницы в новом окне
wait.until(lambda webdriver : webdriver.find_element_by_css_selector("h1"))
webdriver.close()
# возвращаемся в исходное окно
webdriver.switch_to_window(main_window)
def click_links_to_open_windows(driver):
WebDriverWait(driver, 5).until(lambda driver : driver.find_element_by_css_selector("td#content"))
links = driver.find_elements_by_css_selector("form a[target='_blank']")
for link in links:
open_and_close_new_windows(driver, link)
driver.find_element_by_css_selector("span.button-set button[name='cancel']").click()
def test_check_handle_new_windows(driver):
driver.get('http://localhost/litecart/admin/')
login(driver, "admin", "admin")
open_add_new_country_page(driver)
click_links_to_open_windows(driver)
logout(driver)
|
apache-2.0
| 3,625,667,480,234,511,400
| 37.853333
| 116
| 0.731984
| false
| 2.859666
| false
| false
| false
|
mathandy/Classifiers2LearnWith
|
classifiers/tensorflow/vgg16_pre-trained.py
|
1
|
9534
|
"""A pre-trained implimentation of VGG16 with weights trained on ImageNet."""
##########################################################################
# Special thanks to
# http://www.cs.toronto.edu/~frossard/post/vgg16/
# for converting the caffe VGG16 pre-trained weights to TensorFlow
# this file is essentially just a restylized version of his vgg16.py
##########################################################################
from __future__ import print_function, absolute_import, division
import os
import numpy as np
from scipy.misc import imread, imresize
import tensorflow as tf
_debug = True
def conv_layer(input_tensor, diameter, in_dim, out_dim, name=None):
"""Creates a convolutional layer with
Args:
input_tensor: A `Tensor`.
diameter: An `int`, the width and also height of the filter.
in_dim: An `int`, the number of input channels.
out_dim: An `int`, the number of output channels.
name: A `str`, the name for the operation defined by this function.
"""
with tf.name_scope(name):
filter_shape = (diameter, diameter, in_dim, out_dim)
initial_weights = tf.truncated_normal(filter_shape, stddev=0.1)
weights = tf.Variable(initial_weights, name='weights')
conv = tf.nn.conv2d(input=input_tensor,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME',
name='convolution')
initial_biases = tf.constant(1.0, shape=[out_dim], dtype=tf.float32)
biases = tf.Variable(initial_biases, name='biases')
preactivations = tf.nn.bias_add(conv, biases, name='bias_addition')
activations = tf.nn.relu(preactivations, name='activation')
return activations, weights, biases
def fc_layer(in_tensor, in_dim, out_dim, sigmoid=tf.nn.relu, name=None):
"""Creates a fully-connected (ReLU by default) layer with
Args:
in_tensor: A `Tensor`.
in_dim: An `int`, the number of input channels.
out_dim: An `int`, the number of output channels.
sigmoid: A `function`, the activation operation, defaults to tf.nn.relu.
name: A `str`, the name for the operation defined by this function.
"""
with tf.name_scope(name):
initial_weights = tf.truncated_normal((in_dim, out_dim), stddev=0.1)
weights = tf.Variable(initial_weights, name='weights')
initial_biases = tf.constant(0.0, shape=[out_dim], dtype=tf.float32)
biases = tf.Variable(initial_biases, name='biases')
preactivations = tf.nn.bias_add(tf.matmul(in_tensor, weights), biases)
activations = sigmoid(preactivations, name='activation')
return activations, weights, biases
class PreTrainedVGG16:
def __init__(self, weights=None, session=None):
self.input_images = tf.placeholder(tf.float32, (None, 224, 224, 3))
self.activations, self.parameters = self._build_graph()
self.output = self.activations['fc3']
if weights is not None and session is not None:
self.load_weights(weights, session)
def load_weights(self, weight_file, session):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
session.run(self.parameters[i].assign(weights[k]))
@staticmethod
def get_class_names():
with open('ImageNet_Classes.txt') as names_file:
return [l.replace('\n', '') for l in names_file]
def get_output(self, images, auto_resize=True):
""""Takes in a list of images and returns softmax probabilities."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.output, feed_dict)[0]
def get_activations(self, images, auto_resize=True):
""""Takes in a list of images and returns the activation dictionary."""
if auto_resize:
images_ = np.array([imresize(im, (224, 224)) for im in images])
else:
images_ = np.array(images)
feed_dict = {self.input_images: images_}
return sess.run(vgg.activations, feed_dict)[0]
def _build_graph(self):
parameters = [] # storage for trainable parameters
# pooling arguments
_ksize = [1, 2, 2, 1]
_strides = [1, 2, 2, 1]
# center the input images
with tf.name_scope('preprocess_centering'):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32,
shape=[1, 1, 1, 3], name='img_mean')
c_images = self.input_images - mean
# images --> conv1_1 --> conv1_2 --> pool1
print("hi", tf.shape(c_images))
conv1_1, weights1, biases1 = conv_layer(c_images, 3, 3, 64, 'conv1_1')
conv1_2, weights2, biases2 = conv_layer(conv1_1, 3, 64, 64, 'conv1_2')
pool1 = tf.nn.max_pool(conv1_2, _ksize, _strides, 'SAME', name='pool1')
parameters += [weights1, biases1, weights2, biases2]
# pool1 --> conv2_1 --> conv2_2 --> pool2
conv2_1, weights1, biases1 = conv_layer(pool1, 3, 64, 128, 'conv2_1')
conv2_2, weights2, biases2 = conv_layer(conv2_1, 3, 128, 128, 'conv2_2')
pool2 = tf.nn.max_pool(conv2_2, _ksize, _strides, 'SAME', name='pool2')
parameters += [weights1, biases1, weights2, biases2]
# pool2 --> conv3_1 --> conv3_2 --> conv3_3 --> pool3
conv3_1, weights1, biases1 = conv_layer(pool2, 3, 128, 256, 'conv3_1')
conv3_2, weights2, biases2 = conv_layer(conv3_1, 3, 256, 256, 'conv3_2')
conv3_3, weights3, biases3 = conv_layer(conv3_2, 3, 256, 256, 'conv3_3')
pool3 = tf.nn.max_pool(conv3_3, _ksize, _strides, 'SAME', name='pool3')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
# pool3 --> conv4_1 --> conv4_2 --> conv4_3 --> pool4
conv4_1, weights1, biases1 = conv_layer(pool3, 3, 256, 512, 'conv4_1')
conv4_2, weights2, biases2 = conv_layer(conv4_1, 3, 512, 512, 'conv4_2')
conv4_3, weights3, biases3 = conv_layer(conv4_2, 3, 512, 512, 'conv4_3')
pool4 = tf.nn.max_pool(conv4_3, _ksize, _strides, 'SAME', name='pool4')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
# pool4 --> conv5_1 --> conv5_2 --> conv5_3 --> pool5
conv5_1, weights1, biases1 = conv_layer(pool4, 3, 512, 512, 'conv5_1')
conv5_2, weights2, biases2 = conv_layer(conv5_1, 3, 512, 512, 'conv5_2')
conv5_3, weights3, biases3 = conv_layer(conv5_2, 3, 512, 512, 'conv5_3')
pool5 = tf.nn.max_pool(conv5_3, _ksize, _strides, 'SAME', name='pool5')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
# pool5 --> flatten --> fc1 --> fc2 --> fc3
shape = int(np.prod(pool5.get_shape()[1:]))
pool5_flat = tf.reshape(pool5, [-1, shape])
fc1, weights1, biases1 = fc_layer(pool5_flat, shape, 4096, name='fc1')
fc2, weights2, biases2 = fc_layer(fc1, 4096, 4096, name='fc2')
fc3, weights3, biases3 = fc_layer(fc2, 4096, 1000, tf.nn.softmax, 'fc3')
parameters += [weights1, biases1, weights2, biases2, weights3, biases3]
activations = {
'conv1_1': conv1_1, 'conv1_2': conv1_2, 'pool1': pool1,
'conv2_1': conv2_1, 'conv2_2': conv2_2, 'pool2': pool2,
'conv3_1': conv3_1, 'conv3_2': conv3_2, 'conv3_3': conv3_3, 'pool3': pool3,
'conv4_1': conv4_1, 'conv4_2': conv4_2, 'conv4_3': conv4_3, 'pool4': pool4,
'conv5_1': conv5_1, 'conv5_2': conv5_2, 'conv5_3': conv5_3, 'pool5': pool5,
'fc1': fc1, 'fc2': fc2, 'fc3': fc3
}
return activations, parameters
if __name__ == '__main__':
# Get input
os.chdir("../../experiments/vgg16_pre-trained/")
imlist = ['testflash.jpg', 'testme.jpg']
im_names = [os.path.splitext(os.path.basename(imf))[0] for imf in imlist]
input_images = [imread(f, mode='RGB') for f in imlist]
# Check 'vgg16_weights.npz exists
if not os.path.isfile('vgg16_weights.npz'):
raise Exception(
"The weights I use here were converted from the Caffe Model Zoo "
"weights by Davi Frossard. He didn't include a license so I'm "
"hesistant to re-post them. Please download them from his "
"website:\nhttp://www.cs.toronto.edu/~frossard/post/vgg16/")
# Build VGG16
if _debug:
sess = tf.InteractiveSession()
tf.summary.FileWriter('TensorBoard', sess.graph)
else:
sess = tf.Session()
vgg = PreTrainedVGG16('vgg16_weights.npz', sess)
# Run images through network, return softmax probabilities
class_probabilities = vgg.get_output(input_images)
print(class_probabilities.shape)
# Get Class Names
class_names = vgg.get_class_names()
#NOTE: only one file at a time is working... must fix
# Report results
# for imf, cps in zip(imlist, class_probabilities_list):
imf = im_names[0]
print("Top Five Results for", imf + ':')
top5 = (np.argsort(class_probabilities)[::-1])[0:5]
with open(imf + '_results.txt', 'w') as fout:
for p in np.argsort(class_probabilities)[::-1]:
fout.write(str(class_probabilities[p]) + ' : ' + class_names[p] + '\n')
for p in top5:
print(class_probabilities[p], ' : ', class_names[p])
|
mit
| 1,991,808,772,521,995,800
| 43.344186
| 87
| 0.595133
| false
| 3.297821
| false
| false
| false
|
zarr-developers/numcodecs
|
numcodecs/tests/test_shuffle.py
|
1
|
4387
|
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
import numpy as np
import pytest
try:
from numcodecs.shuffle import Shuffle
except ImportError: # pragma: no cover
pytest.skip(
"numcodecs.shuffle not available", allow_module_level=True
)
from numcodecs.tests.common import (check_encode_decode,
check_config,
check_backwards_compatibility)
codecs = [
Shuffle(),
Shuffle(elementsize=0),
Shuffle(elementsize=4),
Shuffle(elementsize=8)
]
# mix of dtypes: integer, float, bool, string
# mix of shapes: 1D, 2D, 3D
# mix of orders: C, F
arrays = [
np.arange(1000, dtype='i4'),
np.linspace(1000, 1001, 1000, dtype='f8'),
np.random.normal(loc=1000, scale=1, size=(100, 10)),
np.random.randint(0, 2, size=1000, dtype=bool).reshape(100, 10, order='F'),
np.random.choice([b'a', b'bb', b'ccc'], size=1000).reshape(10, 10, 10),
np.random.randint(0, 2**60, size=1000, dtype='u8').view('M8[ns]'),
np.random.randint(0, 2**60, size=1000, dtype='u8').view('m8[ns]'),
np.random.randint(0, 2**25, size=1000, dtype='u8').view('M8[m]'),
np.random.randint(0, 2**25, size=1000, dtype='u8').view('m8[m]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('M8[ns]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('m8[ns]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('M8[m]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('m8[m]'),
]
@pytest.mark.parametrize('array', arrays)
@pytest.mark.parametrize('codec', codecs)
def test_encode_decode(array, codec):
check_encode_decode(array, codec)
def test_config():
codec = Shuffle()
check_config(codec)
codec = Shuffle(elementsize=8)
check_config(codec)
def test_repr():
expect = "Shuffle(elementsize=0)"
actual = repr(Shuffle(elementsize=0))
assert expect == actual
expect = "Shuffle(elementsize=4)"
actual = repr(Shuffle(elementsize=4))
assert expect == actual
expect = "Shuffle(elementsize=8)"
actual = repr(Shuffle(elementsize=8))
assert expect == actual
expect = "Shuffle(elementsize=16)"
actual = repr(Shuffle(elementsize=16))
assert expect == actual
def test_eq():
assert Shuffle() == Shuffle()
assert Shuffle(elementsize=16) != Shuffle()
def _encode_worker(data):
compressor = Shuffle()
enc = compressor.encode(data)
return enc
def _decode_worker(enc):
compressor = Shuffle()
data = compressor.decode(enc)
return data
@pytest.mark.parametrize('pool', (Pool, ThreadPool))
def test_multiprocessing(pool):
data = np.arange(1000000)
enc = _encode_worker(data)
pool = pool(5)
# test with process pool and thread pool
# test encoding
enc_results = pool.map(_encode_worker, [data] * 5)
assert all([len(enc) == len(e) for e in enc_results])
# test decoding
dec_results = pool.map(_decode_worker, [enc] * 5)
assert all([data.nbytes == len(d) for d in dec_results])
# tidy up
pool.close()
pool.join()
def test_backwards_compatibility():
check_backwards_compatibility(Shuffle.codec_id, arrays, codecs)
# def test_err_decode_object_buffer():
# check_err_decode_object_buffer(Shuffle())
# def test_err_encode_object_buffer():
# check_err_encode_object_buffer(Shuffle())
# def test_decompression_error_handling():
# for codec in codecs:
# with pytest.raises(RuntimeError):
# codec.decode(bytearray())
# with pytest.raises(RuntimeError):
# codec.decode(bytearray(0))
def test_expected_result():
# Each byte of the 4 byte uint64 is shuffled in such a way
# that for an array of length 4, the last byte of the last
# element becomes the first byte of the first element
# therefore [0, 0, 0, 1] becomes [2**((len-1)*8), 0, 0, 0]
# (where 8 = bits in a byte)
arr = np.array([0, 0, 0, 1], dtype='uint64')
codec = Shuffle(elementsize=arr.data.itemsize)
enc = codec.encode(arr)
assert np.frombuffer(enc.data, arr.dtype)[0] == 2**((len(arr)-1)*8)
def test_incompatible_elementsize():
with pytest.raises(ValueError):
arr = np.arange(1001, dtype='u1')
codec = Shuffle(elementsize=4)
codec.encode(arr)
|
mit
| 2,365,699,436,646,874,600
| 28.05298
| 81
| 0.635742
| false
| 3.235251
| true
| false
| false
|
SorenSeeberg/MrDatabase
|
mr_database/column.py
|
1
|
1505
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
class DataTypes:
@staticmethod
def char(num_chars) -> str:
return f'CHAR({num_chars})'
@staticmethod
def varchar(num_chars=None) -> str:
if num_chars:
return f'VARCHAR({num_chars})'
else:
return 'VARCHAR'
smallint = 'SMALLINT'
integer = 'INTEGER'
datetime = 'DATETIME'
blob = 'BLOB'
class Column:
data_types: DataTypes = DataTypes
def __init__(self,
data_type: str,
data_type_var=None,
default=None,
pk: bool=False,
fk: 'Table.__subclasses__'=None,
unique: bool=False,
not_null: bool=False,
display_name: str=None):
self.data_type = data_type
self.data_type_var = data_type_var
self.default = default
self.pk = pk
if not fk:
self.fk = False
elif type(fk[0]) == str:
self.fk_table_name = fk[0]
self.fk_property = fk[1]
self.fk = True
else:
self.fk_table = fk[0]
self.fk_property = fk[1]
self.fk = True
self.unique = unique
self.not_null = not_null
self.display_name = display_name
def __len__(self):
pass
def __repr__(self) -> str:
return f'Column({self.data_type})'
def __eq__(self, other: 'Column') -> bool:
pass
|
mit
| -5,761,736,747,812,785,000
| 19.337838
| 49
| 0.483721
| false
| 3.839286
| false
| false
| false
|
rickshinners/blinkenlights
|
app/plugins/plugin_loader.py
|
1
|
2974
|
from TestPlugin import TestPlugin
from JenkinsPlugin import JenkinsPlugin, JenkinsHistoryPlugin
import logging
def load_plugins(config, scheduler, set_pixel):
logger = logging.getLogger(__name__)
logger.info("Stopping any existing jobs")
scheduler.remove_all_jobs()
if config is None or len(config) == 0:
logger.info("No plugins configured")
return
for plugin_name in config:
logger.info("Loading plugin: %s" % plugin_name)
try:
plugin_config = config[plugin_name]
plugin = _load_plugin_type(plugin_config, set_pixel)
schedule_config = plugin_config['schedule']
schedule_type = schedule_config['type']
if schedule_type == 'cron':
scheduler.add_job(plugin.run, 'cron', id=plugin_name,
second=schedule_config.get('second', '*'),
minute=schedule_config.get('minute', '*'),
hour=schedule_config.get('hour', '*'),
day_of_week=schedule_config.get('day_of_week', '*'),
week=schedule_config.get('week', '*'),
day=schedule_config.get('day', '*'),
month=schedule_config.get('month', '*'),
year=schedule_config.get('year', '*'),
start_date=schedule_config.get('start_date', None),
end_date=schedule_config.get('end_date', None))
elif schedule_type == 'interval':
scheduler.add_job(plugin.run, 'interval', id=plugin_name,
seconds=schedule_config.get('seconds', 0),
minutes=schedule_config.get('minutes', 0),
hours=schedule_config.get('hours', 0),
days=schedule_config.get('days', 0),
weeks=schedule_config.get('weeks', 0),
start_date=schedule_config.get('start_date', None),
end_date=schedule_config.get('end_date', None))
elif schedule_type == 'immediate':
scheduler.add_job(plugin.run, 'date', id=plugin_name)
else:
raise Exception("Unknown schedule type: %s" % schedule_type)
except Exception, e:
logger.exception("Could not load plugin: %s" % plugin_name)
def _load_plugin_type(config, set_pixel):
type_name = config['plugin_type']
if type_name == 'TestPlugin':
return TestPlugin(config, set_pixel)
elif type_name == 'jenkins':
return JenkinsPlugin(config, set_pixel)
elif type_name == 'jenkins_history':
return JenkinsHistoryPlugin(config, set_pixel)
else:
raise Exception("Unknown plugin type: %s" % type_name)
|
mit
| -5,793,354,938,301,005,000
| 47.754098
| 86
| 0.520511
| false
| 4.56135
| true
| false
| false
|
anthonynguyen/UrTSB
|
urtsb_src/ui/adv_filter_window.py
|
1
|
27886
|
#
# Copyright (C) 2010 Sorcerer
#
# This file is part of UrTSB.
#
# UrTSB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UrTSB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UrTSB. If not, see <http://www.gnu.org/licenses/>.
#
from threading import Thread
from urtsb_src.filemanager import FileManager, filterkey, cfgvalues
from urtsb_src.globals import Globals
from urtsb_src.ui.gametypes_filter import GametypesFilter
import gtk
class AdvancedFilterWindow(gtk.Dialog):
"""
"""
def __init__(self, filter):
"""
Constructor
"""
gtk.Dialog.__init__(self, 'Advanced Filter Settings', None,\
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
self.set_icon_from_file(Globals.icon_dir +'/logo.png')
self.set_default_size(700, 500)
self.filter = filter
#buttons
applybutton = gtk.Button('Apply')
cancelbutton = gtk.Button('Cancel')
defaultbutton = gtk.Button('Defaults')
resetbutton = gtk.Button('Reset')
applybutton.connect("clicked", self.on_apply_clicked)
cancelbutton.connect("clicked", self.on_cancel_clicked)
defaultbutton.connect("clicked", self.on_default_clicked)
resetbutton.connect("clicked", self.on_reset_clicked)
self.action_area.pack_start(defaultbutton, False, False)
self.action_area.pack_start(resetbutton, False, False)
self.action_area.pack_start(cancelbutton, False, False)
self.action_area.pack_start(applybutton, False, False)
self.setup_filter_elements()
self.set_default_values(False)
self.show_all()
def setup_filter_elements(self):
"""
setup the filter elements
"""
basic_filter_box = gtk.HBox()
self.vbox.pack_start(basic_filter_box, False, False)
queryframe = gtk.Frame('Query Parameters')
queryframe.set_border_width(2)
filterframe = gtk.Frame('Basic Filter')
filterframe.set_border_width(2)
basic_filter_box.pack_start(queryframe, False, False)
basic_filter_box.pack_start(filterframe, True, True)
#query parameters, empty and full
querybox = gtk.VBox()
querybox.set_border_width(5)
self.checkbox_showfull = gtk.CheckButton('show full')
self.checkbox_showfull.show()
self.checkbox_showempty = gtk.CheckButton('show empty')
self.checkbox_showempty.show()
#filterframe content
filtertable = gtk.Table(2,5)
filtertable.set_border_width(5)
filterframe.add(filtertable)
self.checkbox_hide_non_responsive = gtk.CheckButton('hide non responsive')
self.checkbox_hide_passworded = gtk.CheckButton('hide passworded')
minplayerlabel = gtk.Label('min. players:')
maxplayerlabel = gtk.Label('max. players:')
self.minplayerentry = gtk.SpinButton()
self.maxplayerentry = gtk.SpinButton()
self.minplayerentry.set_increments(1, 10)
self.maxplayerentry.set_increments(1, 10)
self.minplayerentry.set_range(0,99)
self.maxplayerentry.set_range(0,99)
map_label = gtk.Label('Mapname contains:')
server_label = gtk.Label('Servername contains:')
self.mapnameentry = gtk.Entry()
self.servernameentry = gtk.Entry()
filtertable.attach(self.checkbox_hide_non_responsive, 0,1,0,1 )
filtertable.attach(self.checkbox_hide_passworded, 0,1,1,2 )
filtertable.attach(minplayerlabel, 1,2,0,1 )
filtertable.attach(maxplayerlabel, 1,2,1,2 )
filtertable.attach(self.minplayerentry, 2,3,0,1 )
filtertable.attach(self.maxplayerentry, 2,3,1,2 )
filtertable.attach(map_label, 3,4,0,1)
filtertable.attach(self.mapnameentry, 4,5,0,1)
filtertable.attach(server_label, 3,4,1,2)
filtertable.attach(self.servernameentry, 4,5,1,2)
querybox.pack_start(self.checkbox_showfull)
querybox.pack_start(self.checkbox_showempty)
queryframe.add(querybox)
self.gametypesfilter = GametypesFilter()
self.vbox.pack_start(self.gametypesfilter, False, False)
self.create_gear_chooser()
self.create_cvar_filter()
def create_gear_chooser(self):
"""
Creates the ui elements to choose a g_gear configuration
"""
gear_frame = gtk.Frame('Gear Settings Filter')
gear_type_box = gtk.HBox()
#the include exclude chooser
self.radio_gear_disable = gtk.RadioButton(None, 'Disabled')
self.radio_gear_include = gtk.RadioButton(self.radio_gear_disable, \
'Include (equals)')
self.radio_gear_exclude = gtk.RadioButton(self.radio_gear_disable, \
'Exclude (not equals)')
gear_type_box.pack_start(self.radio_gear_disable)
gear_type_box.pack_start(self.radio_gear_include)
gear_type_box.pack_start(self.radio_gear_exclude)
gear_type_box.set_border_width(5)
gearhbox = gtk.HBox()
gear_frame.add(gearhbox)
gear_choose_area_vbox = gtk.VBox()
gear_table = gtk.Table(4,2)
gear_table.set_border_width(15)
gearhbox.pack_start(gear_choose_area_vbox)
gear_choose_area_vbox.pack_start(gear_type_box)
gear_choose_area_vbox.pack_start(gear_table)
#the checkboxes
self.checkbox_grenades = gtk.CheckButton('Grenades')
self.checkbox_snipers = gtk.CheckButton('Snipers')
self.checkbox_spas = gtk.CheckButton('Spas')
self.checkbox_pistols = gtk.CheckButton('Pistols')
self.checkbox_automatics = gtk.CheckButton('Automatic Guns')
self.checkbox_negev = gtk.CheckButton('Negev')
#connect to the toggled signal
self.checkbox_grenades.connect('toggled', self.on_gear_checkbox_changed)
self.checkbox_snipers.connect('toggled', self.on_gear_checkbox_changed)
self.checkbox_spas.connect('toggled', self.on_gear_checkbox_changed)
self.checkbox_pistols.connect('toggled', self.on_gear_checkbox_changed)
self.checkbox_automatics.connect('toggled', \
self.on_gear_checkbox_changed)
self.checkbox_negev.connect('toggled', self.on_gear_checkbox_changed)
#the value textfield
self.gearvalue = gtk.Entry()
self.gearvalue.set_width_chars(4)
self.gearvalue.set_editable(False)
#the add button
add_button = gtk.Button('Add')
add_button.set_border_width(5)
add_button.connect('clicked', self.on_add_gear_value_clicked)
#now put all into the table
gear_table.attach(self.checkbox_grenades, 0,1,0,1 )
gear_table.attach(self.checkbox_snipers, 0,1,1,2 )
gear_table.attach(self.checkbox_spas, 0,1,2,3 )
gear_table.attach(self.gearvalue, 0,1,3,4 )
gear_table.attach(self.checkbox_pistols, 1,2,0,1 )
gear_table.attach(self.checkbox_automatics, 1,2,1,2 )
gear_table.attach(self.checkbox_negev, 1,2,2,3 )
gear_table.attach(add_button, 1,2,3,4 )
#gear settings treeview area
gear_values_vbox = gtk.VBox()
gearhbox.pack_start(gear_values_vbox)
gear_scrolled_window = gtk.ScrolledWindow()
gear_scrolled_window.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
gear_values_vbox.pack_start(gear_scrolled_window)
self.gearliststore = gtk.ListStore(str)
gear_set_treeview = gtk.TreeView(model=self.gearliststore)
self.gearlistview = gear_set_treeview
gear_scrolled_window.add(gear_set_treeview)
self.column_gear_value = gtk.TreeViewColumn("Gear Value")
gear_set_treeview.append_column(self.column_gear_value)
var_cell0=gtk.CellRendererText()
self.column_gear_value.pack_start(var_cell0, expand=True)
self.column_gear_value.add_attribute(var_cell0, 'text', 0)
self.column_gear_value.set_reorderable(True)
btn_hbox = gtk.HBox()
gear_values_vbox.pack_start(btn_hbox, False, False)
clear_button = gtk.Button('Clear')
clear_button.set_border_width(5)
btn_hbox.pack_start(clear_button, True, True)
clear_button.connect('clicked', self.on_clear_gear_list_clicked)
remove_button = gtk.Button('Remove Selected')
remove_button.set_border_width(5)
btn_hbox.pack_start(remove_button, True, True)
remove_button.connect('clicked', self.on_remove_selected_gear_value)
self.vbox.pack_start(gear_frame, False, False)
def create_cvar_filter(self):
"""
Creates the ui-elements for the custom server cvars filtering
"""
cvar_frame = gtk.Frame('Custom Sever CVARS Filtering')
cvar_main_hbox = gtk.HBox()
cvar_frame.add(cvar_main_hbox)
#settings editing area
cvar_set_vbox = gtk.VBox()
cvar_main_hbox.pack_start(cvar_set_vbox)
variable_label = gtk.Label('Variable:')
value_label = gtk.Label('Value:')
self.variable_entry = gtk.Entry()
self.value_entry = gtk.Entry()
editing_table = gtk.Table(5,2)
editing_table.attach(variable_label, 0,1,0,1)
editing_table.attach(self.variable_entry,1,2,0,1)
editing_table.attach(value_label, 0,1,1,2)
editing_table.attach(self.value_entry, 1,2,1,2)
editing_table.set_border_width(10)
cvar_set_vbox.pack_start(editing_table)
self.radio_cvar_include = gtk.RadioButton(None, 'Include (equals)')
self.radio_cvar_include.set_border_width(5)
self.radio_cvar_exclude = gtk.RadioButton(self.radio_cvar_include, \
'Exclude (not equals)')
self.radio_cvar_exclude.set_border_width(5)
editing_table.attach(self.radio_cvar_include, 1,2,2,3)
editing_table.attach(self.radio_cvar_exclude, 1,2,3,4)
add_button = gtk.Button('Add')
editing_table.attach(add_button, 1,2,4,5)
add_button.connect('clicked', self.on_add_var_filter_clicked)
#the treeview displaying current CVAR filter settings
cvar_values_vbox = gtk.VBox()
cvar_main_hbox.pack_start(cvar_values_vbox)
cvar_scrolled_window = gtk.ScrolledWindow()
cvar_scrolled_window.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
cvar_values_vbox.pack_start(cvar_scrolled_window)
self.cvarliststore = gtk.ListStore(str, str, str, object)
cvar_set_treeview = gtk.TreeView(model=self.cvarliststore)
self.varfilterview = cvar_set_treeview
cvar_scrolled_window.add(cvar_set_treeview)
self.column_cvar_variable = gtk.TreeViewColumn('Variable')
self.column_cvar_value = gtk.TreeViewColumn('Value')
self.column_cvar_type = gtk.TreeViewColumn('Type')
cvar_set_treeview.append_column(self.column_cvar_variable)
cvar_set_treeview.append_column(self.column_cvar_value)
cvar_set_treeview.append_column(self.column_cvar_type)
var_cell0=gtk.CellRendererText()
var_cell1=gtk.CellRendererText()
var_cell2=gtk.CellRendererText()
self.column_cvar_variable.pack_start(var_cell0, expand=True)
self.column_cvar_value.pack_start(var_cell1, expand=False)
self.column_cvar_type.pack_start(var_cell2, expand=False)
self.column_cvar_variable.add_attribute(var_cell0, 'text', 0)
self.column_cvar_value.add_attribute(var_cell1, 'text', 1)
self.column_cvar_type.add_attribute(var_cell2, 'text', 2)
btn_hbox = gtk.HBox()
cvar_values_vbox.pack_start(btn_hbox, False, False)
clear_button = gtk.Button('Clear')
clear_button.set_border_width(5)
btn_hbox.pack_start(clear_button, True, True)
clear_button.connect('clicked', self.on_clear_var_list_clicked)
remove_button = gtk.Button('Remove Selected')
remove_button.set_border_width(5)
btn_hbox.pack_start(remove_button, True, True)
remove_button.connect('clicked', self.on_remove_selected_var)
self.vbox.pack_start(cvar_frame, False, False)
def calculate_gear_value(self):
"""
Calculates the g_gear value
"""
retval = 63
if self.checkbox_grenades.get_active():
retval -= 1
if self.checkbox_snipers.get_active():
retval -= 2
if self.checkbox_spas.get_active():
retval -= 4
if self.checkbox_pistols.get_active():
retval -= 8
if self.checkbox_automatics.get_active():
retval -= 16
if self.checkbox_negev.get_active():
retval -= 32
return retval
def set_default_values(self, reset):
"""
Set default values to all input elements of the filter.
Differs between application defaults and the values that are stored
in a file to remember user choices.
@param reset - boolean: if True use application defaults, otherwise load
values from file.
"""
self.gearliststore.clear()
self.cvarliststore.clear()
fm = FileManager()
stored_filter = fm.get_remembered_filter_parameters()
#gearcheckbox is not stored, only the listview
#initialize with all checked
self.checkbox_grenades.set_active(True)
self.checkbox_snipers.set_active(True)
self.checkbox_spas.set_active(True)
self.checkbox_pistols.set_active(True)
self.checkbox_automatics.set_active(True)
self.checkbox_negev.set_active(True)
if reset or None == stored_filter: #reset to application defaults
self.gametypesfilter.checkbox_show_gametype_all.set_active(True)
# emits the 'toggled' signal
self.gametypesfilter.checkbox_show_gametype_all.toggled()
self.checkbox_hide_non_responsive.set_active(True)
self.checkbox_hide_passworded.set_active(True)
#defaults for min and maxplayer spinbuttons
self.minplayerentry.set_value(0)
self.maxplayerentry.set_value(99)
self.checkbox_showfull.set_active(False)
self.checkbox_showempty.set_active(False)
self.mapnameentry.set_text('')
self.servernameentry.set_text('')
else: #reset to stored values
#gametypes
value = fm.value_as_boolean(stored_filter[filterkey.GT_ALL])
self.gametypesfilter.checkbox_show_gametype_all.set_active(True)
value = fm.value_as_boolean(stored_filter[filterkey.GT_BOMB])
self.gametypesfilter.checkbox_show_gametype_bomb.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.GT_TS])
self.gametypesfilter.checkbox_show_gametype_survivor.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.GT_CTF])
self.gametypesfilter.checkbox_show_gametype_ctf.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.GT_TDM])
self.gametypesfilter.checkbox_show_gametype_tdm.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.GT_CAH])
self.gametypesfilter.checkbox_show_gametype_cah.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.GT_FTL])
self.gametypesfilter.checkbox_show_gametype_ftl.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.GT_FFA])
self.gametypesfilter.checkbox_show_gametype_ffa.set_active(value)
#other filters:
#defaults for min and maxplayer spinbuttons
value = int(stored_filter[filterkey.FLT_MIN_PLAYERS])
self.minplayerentry.set_value(value)
value = int(stored_filter[filterkey.FLT_MAX_PLAYERS])
self.maxplayerentry.set_value(value)
if filterkey.FLT_MAP_NAME in stored_filter:
self.mapnameentry.set_text(stored_filter[filterkey.\
FLT_MAP_NAME])
if filterkey.FLT_SERVER_NAME in stored_filter:
self.servernameentry.set_text(stored_filter[filterkey.\
FLT_SERVER_NAME])
value = fm.value_as_boolean(stored_filter[filterkey.\
FLT_HIDE_NON_RESP])
self.checkbox_hide_non_responsive.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.\
FLT_HIDE_PASSWORDED])
self.checkbox_hide_passworded.set_active(value)
#query params
value = fm.value_as_boolean(stored_filter[filterkey.QRY_SHOW_FULL])
self.checkbox_showfull.set_active(value)
value = fm.value_as_boolean(stored_filter[filterkey.QRY_SHOW_EMPTY])
self.checkbox_showempty.set_active(value)
#the gearvalue list
if filterkey.FLT_GEAR in stored_filter:
value = stored_filter[filterkey.FLT_GEAR]
if cfgvalues.DISABLED == value:
self.radio_gear_disable.set_active(True)
self.radio_gear_exclude.set_active(False)
self.radio_gear_include.set_active(False)
elif cfgvalues.INCLUDE == value:
self.radio_gear_disable.set_active(False)
self.radio_gear_exclude.set_active(False)
self.radio_gear_include.set_active(True)
elif cfgvalues.EXCLUDE == value:
self.radio_gear_disable.set_active(False)
self.radio_gear_exclude.set_active(True)
self.radio_gear_include.set_active(False)
if filterkey.FLT_GEAR_LIST in stored_filter:
for value in stored_filter[filterkey.FLT_GEAR_LIST]:
self.gearliststore.append([value])
if filterkey.FLT_VAR_LIST in stored_filter:
for value in stored_filter[filterkey.FLT_VAR_LIST]:
self.cvarliststore.append([value[0], value[1], \
value[2], value])
def save_filter(self):
"""
writes the current filter/query params to the filter dict
"""
fm = FileManager()
filter = fm.get_remembered_filter_parameters()
if not filter:
# TODO: clean up this dirty hack ;)
fm.filter = {}
filter = fm.filter
#process gametypes
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_all.get_active())
filter[filterkey.GT_ALL] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_bomb.get_active())
filter[filterkey.GT_BOMB] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_survivor.get_active())
filter[filterkey.GT_TS] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_ctf.get_active())
filter[filterkey.GT_CTF] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_tdm.get_active())
filter[filterkey.GT_TDM] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_cah.get_active())
filter[filterkey.GT_CAH] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_ftl.get_active())
filter[filterkey.GT_FTL] = value
value = fm.value_from_boolean(self.gametypesfilter.\
checkbox_show_gametype_ffa.get_active())
filter[filterkey.GT_FFA] = value
#other filters
filter[filterkey.FLT_MIN_PLAYERS] = self.\
minplayerentry.get_value_as_int()
filter[filterkey.FLT_MAX_PLAYERS] = self.\
maxplayerentry.get_value_as_int()
value = fm.value_from_boolean(self.\
checkbox_hide_non_responsive.get_active())
filter[filterkey.FLT_HIDE_NON_RESP] = value
value = fm.value_from_boolean(self.\
checkbox_hide_passworded.get_active())
filter[filterkey.FLT_HIDE_PASSWORDED] = value
#mapname and servername filter
filter[filterkey.FLT_MAP_NAME] = self.mapnameentry.get_text()
filter[filterkey.FLT_SERVER_NAME] = self.servernameentry.get_text()
#query params
value = fm.value_from_boolean(self.checkbox_showfull.get_active())
filter[filterkey.QRY_SHOW_FULL] = value
value = fm.value_from_boolean(self.checkbox_showempty.get_active())
filter[filterkey.QRY_SHOW_EMPTY] = value
if self.radio_gear_disable.get_active():
filter[filterkey.FLT_GEAR] = cfgvalues.DISABLED
elif self.radio_gear_include.get_active():
filter[filterkey.FLT_GEAR] = cfgvalues.INCLUDE
elif self.radio_gear_exclude.get_active():
filter[filterkey.FLT_GEAR] = cfgvalues.EXCLUDE
#iterate over gearliststore to create a list of geavalues
iter = self.gearliststore.iter_children(None)
gearvalues = [] #empty list
while iter:
value = self.gearliststore.get_value(iter, 0)
gearvalues.append(value)
iter = self.gearliststore.iter_next(iter)
filter[filterkey.FLT_GEAR_LIST] = gearvalues
#iterate over varliststore to create the list of filter vars
iter = self.cvarliststore.iter_children(None)
varlist = []
while iter:
varfilter = self.cvarliststore.get_value(iter, 3)
varlist.append(varfilter)
iter = self.cvarliststore.iter_next(iter)
filter[filterkey.FLT_VAR_LIST] = varlist
#write to file
t = Thread(target=fm.save_filter_to_remember)
t.setDaemon(True)
t.start()
def on_apply_clicked(self, widget):
"""
Callback of the Apply button
"""
self.save_filter()
self.destroy()
def on_cancel_clicked(self, widget):
"""
Callback of the Cancel button
"""
#do nothing just close the dialog
self.destroy()
def on_reset_clicked(self, widget):
"""
Callback of the reset button
Reset the filter to the last applied values
"""
self.set_default_values(False)
def on_add_gear_value_clicked(self, widget):
"""
Callback of the add button in the gear selection filter area
Adds the current gear value to the gear value list
"""
gearvalue = self.gearvalue.get_text()
self.gearliststore.append([gearvalue])
def on_clear_gear_list_clicked(self, widget):
"""
Callback of the clear gear list button
clears the treeview
"""
self.gearliststore.clear()
def on_clear_var_list_clicked(self, button):
"""
Callback of the clear varlist button
clears the treeview/liststore
"""
self.cvarliststore.clear()
def on_default_clicked(self, widget):
"""
Callback of the defaults button
Reset the filter to the default values (not the stored/last applied
values)
"""
self.set_default_values(True)
def on_gear_checkbox_changed(self, checkbox):
"""
Callback for the toggled signal of the gear (weapons) checkboxes
triggers the calculation of the g_gear value and sets it to the
text entry
"""
g_gear_value = self.calculate_gear_value()
self.gearvalue.set_text(str(g_gear_value))
def on_remove_selected_gear_value(self, button):
"""
Callback of the remove selected button of the gear value treeview list
"""
selection = self.gearlistview.get_selection()
result = selection.get_selected()
if result:
iter = result[1]
self.gearliststore.remove(iter)
def on_remove_selected_var(self, button):
"""
Callback of the remoce selected button of the custom filtering area
"""
selection = self.varfilterview.get_selection()
result = selection.get_selected()
if result:
iter = result[1]
self.cvarliststore.remove(iter)
def on_add_var_filter_clicked(self, button):
"""
Callback of the add button in the custom variable filtering area
"""
varname = self.variable_entry.get_text()
varvalue = self.value_entry.get_text()
#both values not None and larger than 0
if not None == varname and not len(varname) == 0 and not None\
== varvalue and not len(varvalue) == 0:
var = [None]*3
var[0] = varname
var[1] = varvalue
if self.radio_cvar_include.get_active():
var[2] = cfgvalues.INCLUDE
elif self.radio_cvar_exclude.get_active():
var[2] = cfgvalues.EXCLUDE
self.cvarliststore.append([var[0], var[1], var[2], var])
|
gpl-3.0
| -1,009,639,285,081,446,900
| 38.952722
| 85
| 0.578426
| false
| 3.909435
| false
| false
| false
|
Saturn/soccer-cli
|
soccer/writers.py
|
1
|
14627
|
import click
import csv
import datetime
import json
import io
from abc import ABCMeta, abstractmethod
from itertools import groupby
from collections import namedtuple
from soccer import leagueids, leagueproperties
LEAGUE_PROPERTIES = leagueproperties.LEAGUE_PROPERTIES
LEAGUE_IDS = leagueids.LEAGUE_IDS
def get_writer(output_format='stdout', output_file=None):
return globals()[output_format.capitalize()](output_file)
class BaseWriter(object):
__metaclass__ = ABCMeta
def __init__(self, output_file):
self.output_filename = output_file
@abstractmethod
def live_scores(self, live_scores):
pass
@abstractmethod
def team_scores(self, team_scores, time):
pass
@abstractmethod
def team_players(self, team):
pass
@abstractmethod
def standings(self, league_table, league):
pass
@abstractmethod
def league_scores(self, total_data, time):
pass
class Stdout(BaseWriter):
def __init__(self, output_file):
self.Result = namedtuple("Result", "homeTeam, goalsHomeTeam, awayTeam, goalsAwayTeam")
enums = dict(
WIN="red",
LOSE="blue",
TIE="yellow",
MISC="green",
TIME="yellow",
CL_POSITION="green",
EL_POSITION="yellow",
RL_POSITION="red",
POSITION="blue"
)
self.colors = type('Enum', (), enums)
def live_scores(self, live_scores):
"""Prints the live scores in a pretty format"""
scores = sorted(live_scores, key=lambda x: x["league"])
for league, games in groupby(scores, key=lambda x: x["league"]):
self.league_header(league)
for game in games:
self.scores(self.parse_result(game), add_new_line=False)
click.secho(' %s' % Stdout.utc_to_local(game["time"],
use_12_hour_format=False),
fg=self.colors.TIME)
click.echo()
def team_scores(self, team_scores, time, show_datetime, use_12_hour_format):
"""Prints the teams scores in a pretty format"""
for score in team_scores["matches"]:
if score["status"] == "FINISHED":
click.secho("%s\t" % score["utcDate"].split('T')[0],
fg=self.colors.TIME, nl=False)
self.scores(self.parse_result(score))
elif show_datetime:
self.scores(self.parse_result(score), add_new_line=False)
click.secho(' %s' % Stdout.utc_to_local(score["utcDate"],
use_12_hour_format,
show_datetime),
fg=self.colors.TIME)
def team_players(self, team):
"""Prints the team players in a pretty format"""
players = sorted(team, key=lambda d: d['shirtNumber'])
click.secho("%-4s %-25s %-20s %-20s %-15s" %
("N.", "NAME", "POSITION", "NATIONALITY", "BIRTHDAY"),
bold=True,
fg=self.colors.MISC)
fmt = (u"{shirtNumber:<4} {name:<28} {position:<23} {nationality:<23}"
u" {dateOfBirth:<18}")
for player in players:
click.secho(fmt.format(**player), bold=True)
def standings(self, league_table, league):
""" Prints the league standings in a pretty way """
click.secho("%-6s %-30s %-10s %-10s %-10s" %
("POS", "CLUB", "PLAYED", "GOAL DIFF", "POINTS"))
for team in league_table["standings"][0]["table"]:
if team["goalDifference"] >= 0:
team["goalDifference"] = ' ' + str(team["goalDifference"])
# Define the upper and lower bounds for Champions League,
# Europa League and Relegation places.
# This is so we can highlight them appropriately.
cl_upper, cl_lower = LEAGUE_PROPERTIES[league]['cl']
el_upper, el_lower = LEAGUE_PROPERTIES[league]['el']
rl_upper, rl_lower = LEAGUE_PROPERTIES[league]['rl']
team['teamName'] = team['team']['name']
team_str = (u"{position:<7} {teamName:<33} {playedGames:<12}"
u" {goalDifference:<14} {points}").format(**team)
if cl_upper <= team["position"] <= cl_lower:
click.secho(team_str, bold=True, fg=self.colors.CL_POSITION)
elif el_upper <= team["position"] <= el_lower:
click.secho(team_str, fg=self.colors.EL_POSITION)
elif rl_upper <= team["position"] <= rl_lower:
click.secho(team_str, fg=self.colors.RL_POSITION)
else:
click.secho(team_str, fg=self.colors.POSITION)
def league_scores(self, total_data, time, show_datetime,
use_12_hour_format):
"""Prints the data in a pretty format"""
for match in total_data['matches']:
self.scores(self.parse_result(match), add_new_line=not show_datetime)
if show_datetime:
click.secho(' %s' % Stdout.utc_to_local(match["utcDate"],
use_12_hour_format,
show_datetime),
fg=self.colors.TIME)
click.echo()
def league_header(self, league):
"""Prints the league header"""
league_name = " {0} ".format(league)
click.secho("{:=^62}".format(league_name), fg=self.colors.MISC)
click.echo()
def scores(self, result, add_new_line=True):
"""Prints out the scores in a pretty format"""
if result.goalsHomeTeam > result.goalsAwayTeam:
homeColor, awayColor = (self.colors.WIN, self.colors.LOSE)
elif result.goalsHomeTeam < result.goalsAwayTeam:
homeColor, awayColor = (self.colors.LOSE, self.colors.WIN)
else:
homeColor = awayColor = self.colors.TIE
click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam),
fg=homeColor, nl=False)
click.secho(" vs ", nl=False)
click.secho('%2s %s' % (result.goalsAwayTeam,
result.awayTeam.rjust(25)), fg=awayColor,
nl=add_new_line)
def parse_result(self, data):
"""Parses the results and returns a Result namedtuple"""
def valid_score(score):
return "" if score is None else score
return self.Result(
data["homeTeam"]["name"],
valid_score(data["score"]["fullTime"]["homeTeam"]),
data["awayTeam"]["name"],
valid_score(data["score"]["fullTime"]["awayTeam"]))
@staticmethod
def utc_to_local(time_str, use_12_hour_format, show_datetime=False):
"""Converts the API UTC time string to the local user time."""
if not (time_str.endswith(" UTC") or time_str.endswith("Z")):
return time_str
today_utc = datetime.datetime.utcnow()
utc_local_diff = today_utc - datetime.datetime.now()
if time_str.endswith(" UTC"):
time_str, _ = time_str.split(" UTC")
utc_time = datetime.datetime.strptime(time_str, '%I:%M %p')
utc_datetime = datetime.datetime(today_utc.year,
today_utc.month,
today_utc.day,
utc_time.hour,
utc_time.minute)
else:
utc_datetime = datetime.datetime.strptime(time_str,
'%Y-%m-%dT%H:%M:%SZ')
local_time = utc_datetime - utc_local_diff
if use_12_hour_format:
date_format = '%I:%M %p' if not show_datetime else '%a %d, %I:%M %p'
else:
date_format = '%H:%M' if not show_datetime else '%a %d, %H:%M'
return datetime.datetime.strftime(local_time, date_format)
class Csv(BaseWriter):
def generate_output(self, result):
if not self.output_filename:
for row in result:
click.echo(u','.join(unicode(item) for item in row))
else:
with open(self.output_filename, 'w') as csv_file:
writer = csv.writer(csv_file)
for row in result:
row = [unicode(s).encode('utf-8') for s in row]
writer.writerow(row)
def live_scores(self, live_scores):
"""Store output of live scores to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([game['league'], game['homeTeamName'],
game['goalsHomeTeam'], game['goalsAwayTeam'],
game['awayTeamName']] for game in live_scores['games'])
self.generate_output(result)
def team_scores(self, team_scores, time):
"""Store output of team scores to a CSV file"""
headers = ['Date', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
result.extend([score["utcDate"].split('T')[0],
score['homeTeam']['name'],
score['score']['fullTime']['homeTeam'],
score['score']['fullTime']['awayTeam'],
score['awayTeam']['name']]
for score in team_scores['matches']
if score['status'] == 'FINISHED')
self.generate_output(result)
def team_players(self, team):
"""Store output of team players to a CSV file"""
headers = ['Jersey Number', 'Name', 'Position', 'Nationality',
'Date of Birth']
result = [headers]
result.extend([player['shirtNumber'],
player['name'],
player['position'],
player['nationality'],
player['dateOfBirth']]
for player in team)
self.generate_output(result)
def standings(self, league_table, league):
"""Store output of league standings to a CSV file"""
headers = ['Position', 'Team Name', 'Games Played', 'Goal For',
'Goals Against', 'Goal Difference', 'Points']
result = [headers]
result.extend([team['position'],
team['team']['name'],
team['playedGames'],
team['goalsFor'],
team['goalsAgainst'],
team['goalDifference'],
team['points']]
for team in league_table['standings'][0]['table'])
self.generate_output(result)
def league_scores(self, total_data, time, show_upcoming, use_12_hour_format):
"""Store output of fixtures based on league and time to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
league = total_data['competition']['name']
result.extend([league,
score['homeTeam']['name'],
score['score']['fullTime']['homeTeam'],
score['score']['fullTime']['awayTeam'],
score['awayTeam']['name']]
for score in total_data['matches'])
self.generate_output(result)
class Json(BaseWriter):
def generate_output(self, result):
if not self.output_filename:
click.echo(json.dumps(result,
indent=4,
separators=(',', ': '),
ensure_ascii=False))
else:
with io.open(self.output_filename, 'w', encoding='utf-8') as f:
data = json.dumps(result, f, indent=4,
separators=(',', ': '), ensure_ascii=False)
f.write(data)
def live_scores(self, live_scores):
"""Store output of live scores to a JSON file"""
self.generate_output(live_scores['games'])
def team_scores(self, team_scores, time):
"""Store output of team scores to a JSON file"""
data = []
for score in team_scores['matches']:
if score['status'] == 'FINISHED':
item = {'date': score["utcDate"].split('T')[0],
'homeTeamName': score['homeTeam']['name'],
'goalsHomeTeam': score['score']['fullTime']['homeTeam'],
'goalsAwayTeam': score['score']['fullTime']['awayTeam'],
'awayTeamName': score['awayTeam']['name']}
data.append(item)
self.generate_output({'team_scores': data})
def standings(self, league_table, league):
"""Store output of league standings to a JSON file"""
data = []
for team in league_table['standings'][0]['table']:
item = {'position': team['position'],
'teamName': team['team'],
'playedGames': team['playedGames'],
'goalsFor': team['goalsFor'],
'goalsAgainst': team['goalsAgainst'],
'goalDifference': team['goalDifference'],
'points': team['points']}
data.append(item)
self.generate_output({'standings': data})
def team_players(self, team):
"""Store output of team players to a JSON file"""
keys = 'shirtNumber name position nationality dateOfBirth'.split()
data = [{key: player[key] for key in keys} for player in team]
self.generate_output({'players': data})
def league_scores(self, total_data, time):
"""Store output of fixtures based on league and time to a JSON file"""
data = []
for league, score in self.supported_leagues(total_data):
item = {'league': league, 'homeTeamName': score['homeTeamName'],
'goalsHomeTeam': score['result']['goalsHomeTeam'],
'goalsAwayTeam': score['result']['goalsAwayTeam'],
'awayTeamName': score['awayTeamName']}
data.append(item)
self.generate_output({'league_scores': data, 'time': time})
|
mit
| 6,004,862,366,422,027,000
| 41.031609
| 94
| 0.521912
| false
| 4.135426
| false
| false
| false
|
yudaykiran/openebs
|
e2e/ansible/plugins/callback/openebs.py
|
1
|
2743
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import (
CallbackModule as CallbackModule_default
)
from ansible import constants as C
"""Implementation of Custom Class that inherits the 'default' stdout_callback
plugin and overrides the v2_runner_retry api for displaying the 'FAILED -
RETRYING' only during verbose mode."""
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'openebs'
CALLBACK_NEEDS_WHITELIST = False
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
final_result = result._result['retries'] - result._result['attempts']
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name,
final_result)
display_verbosity = self._display.verbosity
required_result = '_ansible_verbose_always'
if (display_verbosity > 2 or required_result in result._result):
if required_result not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.v('%s' % (msg))
def v2_runner_on_skipped(self, result):
my_result = result._result
required_result = '_ansible_verbose_always'
if C.DISPLAY_SKIPPED_HOSTS:
if (self._display.verbosity > 0 or required_result in my_result):
if required_result not in my_result:
dumped_results = self._dump_results(my_result)
msg = "skipping: [%s] => %s" % (result._host.get_name(),
dumped_results)
self._display.display(msg, color=C.COLOR_SKIP)
else:
self._display.display("skipping task..", color=C.COLOR_SKIP)
def v2_runner_item_on_skipped(self, result):
my_result = result._result
required_result = '_ansible_verbose_always'
if C.DISPLAY_SKIPPED_HOSTS:
if (self._display.verbosity > 0 or required_result in my_result):
if required_result not in my_result:
required_item = self._get_item(my_result)
dumped_result = self._dump_results(my_result)
result_host = result._host.get_name()
msg = "skipping: [%s] => (item=%s) => %s" % (result_host,
required_item,
dumped_result)
self._display.display(msg, color=C.COLOR_SKIP)
|
apache-2.0
| 2,254,038,138,875,758,600
| 42.539683
| 79
| 0.56471
| false
| 4.265941
| false
| false
| false
|
TomAugspurger/pandas
|
pandas/core/computation/ops.py
|
1
|
15978
|
"""
Operator classes for eval.
"""
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import operator
from typing import Callable, Iterable, Optional, Union
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import is_list_like, is_scalar
import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
_reductions = ("sum", "prod")
_unary_math_ops = (
"sin",
"cos",
"exp",
"log",
"expm1",
"log1p",
"sqrt",
"sinh",
"cosh",
"tanh",
"arcsin",
"arccos",
"arctan",
"arccosh",
"arcsinh",
"arctanh",
"abs",
"log10",
"floor",
"ceil",
)
_binary_math_ops = ("arctan2",)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = "__pd_eval_local_"
class UndefinedVariableError(NameError):
"""
NameError subclass for local variables.
"""
def __init__(self, name: str, is_local: Optional[bool] = None):
base_msg = f"{repr(name)} is not defined"
if is_local:
msg = f"local variable {base_msg}"
else:
msg = f"name {base_msg}"
super().__init__(msg)
class Term:
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
is_local: bool
def __init__(self, name, env, side=None, encoding=None):
# name is a str for Term, but may be something else for subclasses
self._name = name
self.env = env
self.side = side
tname = str(name)
self.is_local = tname.startswith(_LOCAL_TAG) or tname in _DEFAULT_GLOBALS
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self) -> str:
return self.name.replace(_LOCAL_TAG, "")
def __repr__(self) -> str:
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, "ndim") and res.ndim > 2:
raise NotImplementedError(
"N-dimensional objects, where N > 2, are not supported with eval"
)
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, str):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self) -> bool:
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self) -> str:
return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})"
@property
def is_datetime(self) -> bool:
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@property
def ndim(self) -> int:
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __repr__(self) -> str:
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {"not": "~", "and": "&", "or": "|"}
class Op:
"""
Hold an operator of arbitrary arity.
"""
op: str
def __init__(self, op: str, operands: Iterable[Union[Term, "Op"]], encoding=None):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = encoding
def __iter__(self):
return iter(self.operands)
def __repr__(self) -> str:
"""
Print a generic n-ary operator and its operands using infix notation.
"""
# recurse over the operands
parened = (f"({pprint_thing(opr)})" for opr in self.operands)
return pprint_thing(f" {self.op} ".join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self) -> bool:
types = self.operand_types
obj_dtype_set = frozenset([np.dtype("object")])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self) -> bool:
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self) -> bool:
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""
Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""
Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = (">", "<", ">=", "<=", "==", "!=", "in", "not in")
_cmp_ops_funcs = (
operator.gt,
operator.lt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
_in,
_not_in,
)
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = ("&", "|", "and", "or")
_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_)
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = ("+", "-", "*", "/", "**", "//", "%")
_arith_ops_funcs = (
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.pow,
operator.floordiv,
operator.mod,
)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = ("**", "//", "%")
_special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod)
_special_case_arith_ops_dict = dict(
zip(_special_case_arith_ops_syms, _special_case_arith_ops_funcs)
)
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""
Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj) -> bool:
return isinstance(obj, Term)
class BinOp(Op):
"""
Hold a binary operator and its operands.
Parameters
----------
op : str
lhs : Term or Op
rhs : Term or Op
"""
def __init__(self, op: str, lhs, rhs):
super().__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError as err:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError(
f"Invalid binary operator {repr(op)}, valid operators are {keys}"
) from err
def __call__(self, env):
"""
Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
"""
Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == "python":
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
right = self.rhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
from pandas.core.computation.eval import eval
res = eval(self, local_dict=env, engine=engine, parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""
Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
encoder: Callable
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if (
(self.lhs.is_scalar or self.rhs.is_scalar)
and self.op in _bool_ops_dict
and (
not (
issubclass(self.rhs.return_type, (bool, np.bool_))
and issubclass(self.lhs.return_type, (bool, np.bool_))
)
)
):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype) -> bool:
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""
Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
"""
def __init__(self, lhs, rhs):
super().__init__("/", lhs, rhs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError(
f"unsupported operand type(s) for {self.op}: "
f"'{lhs.return_type}' and '{rhs.return_type}'"
)
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = ("+", "-", "~", "not")
_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert)
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""
Hold a unary operator and its operands.
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op: str, operand):
super().__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError as err:
raise ValueError(
f"Invalid unary operator {repr(op)}, "
f"valid operators are {_unary_ops_syms}"
) from err
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __repr__(self) -> str:
return pprint_thing(f"{self.op}({self.operand})")
@property
def return_type(self) -> np.dtype:
operand = self.operand
if operand.return_type == np.dtype("bool"):
return np.dtype("bool")
if isinstance(operand, Op) and (
operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict
):
return np.dtype("bool")
return np.dtype("int")
class MathCall(Op):
def __init__(self, func, args):
super().__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all="ignore"):
return self.func.func(*operands)
def __repr__(self) -> str:
operands = map(str, self.operands)
return pprint_thing(f"{self.op}({','.join(operands)})")
class FuncNode:
def __init__(self, name: str):
from pandas.core.computation.check import _NUMEXPR_INSTALLED, _NUMEXPR_VERSION
if name not in _mathops or (
_NUMEXPR_INSTALLED
and _NUMEXPR_VERSION < LooseVersion("2.6.9")
and name in ("floor", "ceil")
):
raise ValueError(f'"{name}" is not a supported function')
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
|
bsd-3-clause
| -7,085,088,927,918,930,000
| 25.279605
| 86
| 0.550194
| false
| 3.817921
| false
| false
| false
|
AVSystem/Anjay
|
tests/integration/framework/asserts.py
|
1
|
10170
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import Optional
from .lwm2m.messages import *
from .test_utils import DEMO_ENDPOINT_NAME
from framework.lwm2m.coap.transport import Transport
class Lwm2mAsserts:
def assertLwm2mPathValid(self, path):
"""
Convenience assert that checks if a byte-string PATH is in the form
/0/1/2. The PATH may contain 1-3 16bit integer segments.
"""
self.assertEqual('/', path[0],
('LwM2M path %r does not start with /' % (path,)))
segments = path[1:].split('/')
if len(segments) > 3:
self.fail('LwM2M path too long (expected at most 3 segments): %r' % (path,))
for segment in segments:
try:
self.assertTrue(0 <= int(segment) <= 2 ** 16 - 1,
('LwM2M path segment not in range [0, 65535] '
'in path %r' % (path,)))
except ValueError:
self.fail('segment rs is not an integer in link: %r' % (segment, path))
def assertLinkListValid(self, link_list):
"""
Convenience assert that checks if a byte-string LINK_LIST is in a CoRE
Link format https://tools.ietf.org/html/rfc6690 and all links are
valid LwM2M paths.
"""
if link_list == '':
self.fail('empty link list')
for obj in link_list.split(','):
path, *query = obj.split(';')
self.assertTrue((len(path) >= len('</0>')
and path[0] == '<'
and path[-1] == '>'),
'invalid link: %r in %r' % (obj, link_list))
self.assertLwm2mPathValid(path[1:-1])
# TODO: check query strings
def assertMsgEqual(self, expected, actual, msg=None):
"""
Convenience assert that checks if ACTUAL Lwm2mMsg object matches
EXPECTED one.
ACTUAL and EXPECTED may have their MSG_ID, TOKEN, OPTIONS or CONTENT
fields set to lwm2m.messages.ANY, in which case the value will not
be checked.
"""
msg_prefix = msg + ': ' if msg else ''
try:
if actual.version is not None:
self.assertEqual(expected.version, actual.version,
msg_prefix + 'unexpected CoAP version')
if actual.type is not None:
self.assertEqual(expected.type, actual.type,
msg_prefix + 'unexpected CoAP type')
self.assertEqual(expected.code, actual.code,
msg_prefix + 'unexpected CoAP code')
if expected.msg_id is not ANY and actual.msg_id is not ANY and actual.msg_id is not None:
self.assertEqual(expected.msg_id, actual.msg_id,
msg_prefix + 'unexpected CoAP message ID')
if expected.token is not ANY and actual.token is not ANY:
self.assertEqual(expected.token, actual.token,
msg_prefix + 'unexpected CoAP token')
if expected.options is not ANY and actual.options is not ANY:
self.assertEqual(expected.options, actual.options,
msg_prefix + 'unexpected CoAP option list')
if expected.content is not ANY and actual.content is not ANY:
self.assertEqual(expected.content, actual.content,
msg_prefix + 'unexpected CoAP content')
except AssertionError as e:
e.args = (e.args[0] + ('\n\n*** Expected ***\n%s\n*** Actual ***\n%s\n'
% (str(expected), str(actual))),) + e.args[1:]
raise
DEFAULT_REGISTER_ENDPOINT = '/rd/demo'
@staticmethod
def _expected_register_message(version, endpoint, lifetime, binding, lwm2m11_queue_mode):
# Note: the specific order of Uri-Query options does not matter, but
# our packet equality comparator does not distinguish betwen "ordered"
# and "unordered" options, so we expect a specific order of these
# query-strings. dict() does not guarantee the order of items until
# 3.7, so because we want to still work on 3.5, an explicitly ordered
# list is used instead.
query = [
'lwm2m=%s' % (version,),
'ep=%s' % (endpoint,),
'lt=%s' % (lifetime if lifetime is not None else 86400,)
]
if binding is not None:
query.append('b=%s' % (binding,))
if lwm2m11_queue_mode:
query.append('Q')
return Lwm2mRegister('/rd?' + '&'.join(query))
def assertDemoRegisters(self,
server=None,
version='1.0',
location=DEFAULT_REGISTER_ENDPOINT,
endpoint=DEMO_ENDPOINT_NAME,
lifetime=None,
timeout_s=2,
respond=True,
binding=None,
lwm2m11_queue_mode=False,
reject=False):
# passing a float instead of an integer results in a disaster
# (serializes as e.g. lt=4.0 instead of lt=4), which makes the
# assertion fail
if lifetime is not None:
self.assertIsInstance(lifetime, int, msg="lifetime MUST be an integer")
serv = server or self.serv
pkt = serv.recv(timeout_s=timeout_s)
self.assertMsgEqual(self._expected_register_message(version, endpoint, lifetime, binding, lwm2m11_queue_mode), pkt)
self.assertIsNotNone(pkt.content)
self.assertGreater(len(pkt.content), 0)
if respond:
if reject:
serv.send(Lwm2mErrorResponse(code=coap.Code.RES_UNAUTHORIZED, msg_id=pkt.msg_id, token=pkt.token))
else:
serv.send(Lwm2mCreated(location=location, msg_id=pkt.msg_id, token=pkt.token))
return pkt
def assertDemoUpdatesRegistration(self,
server=None,
location=DEFAULT_REGISTER_ENDPOINT,
lifetime: Optional[int] = None,
binding: Optional[str] = None,
sms_number: Optional[str] = None,
content: bytes = b'',
timeout_s: float = 1,
respond: bool = True):
serv = server or self.serv
query_args = (([('lt', lifetime)] if lifetime is not None else [])
+ ([('sms', sms_number)] if sms_number is not None else [])
+ ([('b', binding)] if binding is not None else []))
query_string = '&'.join('%s=%s' % tpl for tpl in query_args)
path = location
if query_string:
path += '?' + query_string
pkt = serv.recv(timeout_s=timeout_s)
self.assertMsgEqual(Lwm2mUpdate(path, content=content), pkt)
if respond:
serv.send(Lwm2mChanged.matching(pkt)())
return pkt
def assertDemoDeregisters(self, server=None, path=DEFAULT_REGISTER_ENDPOINT, timeout_s=2, reset=True):
serv = server or self.serv
pkt = serv.recv(timeout_s=timeout_s)
self.assertMsgEqual(Lwm2mDeregister(path), pkt)
serv.send(Lwm2mDeleted(msg_id=pkt.msg_id, token=pkt.token))
if reset:
serv.reset()
def assertDemoRequestsBootstrap(self, uri_path='', uri_query=None, respond_with_error_code=None,
endpoint=DEMO_ENDPOINT_NAME, timeout_s=-1, preferred_content_format=None):
pkt = self.bootstrap_server.recv(timeout_s=timeout_s)
self.assertMsgEqual(Lwm2mRequestBootstrap(endpoint_name=endpoint,
preferred_content_format=preferred_content_format,
uri_path=uri_path,
uri_query=uri_query), pkt)
if respond_with_error_code is None:
self.bootstrap_server.send(Lwm2mChanged.matching(pkt)())
else:
self.bootstrap_server.send(Lwm2mErrorResponse.matching(
pkt)(code=respond_with_error_code))
def assertDtlsReconnect(self, server=None, timeout_s=1):
serv = server or self.serv
with self.assertRaises(RuntimeError) as raised:
serv.recv(timeout_s=timeout_s)
self.assertIn('0x6780', raised.exception.args[0]) # -0x6780 == MBEDTLS_ERR_SSL_CLIENT_RECONNECT
def assertPktIsDtlsClientHello(self, pkt, seq_number=ANY):
if seq_number is not ANY and seq_number >= 2 ** 48:
raise RuntimeError(
"Sorry, encoding of sequence number greater than 2**48 - 1 is not supported")
allowed_headers = set()
for version in (b'\xfe\xfd', b'\xfe\xff'): # DTLS v1.0 or DTLS v1.2
header = b'\x16' # Content Type: Handshake
header += version
header += b'\x00\x00' # Epoch: 0
if seq_number is not ANY:
# Sequence number is 48bit in length.
header += seq_number.to_bytes(48 // 8, byteorder='big')
allowed_headers.add(header)
self.assertIn(pkt[:len(next(iter(allowed_headers)))], allowed_headers)
|
apache-2.0
| -5,839,195,946,829,177,000
| 44.2
| 123
| 0.553392
| false
| 4.205955
| false
| false
| false
|
timpel/stanford-algs
|
quicksort/quicksort.py
|
1
|
1040
|
from random import randint
import sys
def sort(arr):
length = len(arr)
pivot_index = randint(0, length-1)
pivot = arr[pivot_index]
swap(arr, 0, pivot_index)
i = j = 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, 0, i-1)
first_part = arr[:i-1]
second_part = arr[i:]
if i > 2:
first_part = sort(first_part)
if length - i > 1:
second_part = sort(second_part)
return first_part + [arr[i-1]] + second_part
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def check(arr, length):
if length != len(arr):
print 'Array size changed!'
return False
for i in range(length-1):
if arr[i] > arr[i+1]:
print 'Sort Failed!'
return False
return True
def main(arr_len):
unsorted = [randint(0, arr_len) for n in range(arr_len)]
length = len(unsorted)
check(sort(unsorted), length)
if __name__ == '__main__':
try:
arr_len = int(sys.argv[1])
except (IndexError, ValueError):
print 'Format: python quicksort.py <array-length>'
main(arr_len)
|
mit
| -7,494,212,191,945,609,000
| 15.25
| 57
| 0.618269
| false
| 2.506024
| false
| false
| false
|
egenerat/bank-statement-analyser
|
analyzer/utils.py
|
1
|
2843
|
import datetime
import os
import sys
from constants import CATEGORIES, DIRECT_DEBIT_PAYMENT, ROOT_DIR
def sum_total_expenses(data_dict):
expenses_sum = 0
transaction_nb = 0
for i in data_dict:
if DIRECT_DEBIT_PAYMENT.lower() not in i['description'].lower() and i['amount'] < 0 and not i['amount'] == -720:
expenses_sum += i['amount']
transaction_nb += 1
return {
'expenses_sum': expenses_sum,
'transaction_nb': transaction_nb
}
def display_highest_amounts(expenses):
sorted_result = sorted(expenses, key=lambda x: x['amount'], reverse=True)
for i in sorted_result:
print('{date} {description} {amount}'.format(date=i['date'], description=i['description'], amount=i['amount']))
def display_sorted_categories(expenses):
result_to_display = order_by_category(expenses, CATEGORIES)
sorted_result = sorted(result_to_display.items(), key=lambda x: x[1], reverse=True)
for i in sorted_result:
category_amount = i[1]['amount']
if category_amount != 0:
print('{cat}: {amount}'.format(cat=i[0], amount=category_amount))
# if result_to_display['unCategorized']['amount'] != 0:
# print('unCategorized:')
# print(result_to_display['unCategorized'])
# for i in result_to_display['unCategorized']['obj']:
# print(i)
def get_all_data_files():
walk_dir = ROOT_DIR
result = [os.path.join(root, f) for root, subdirs, files in os.walk(walk_dir) for f in files]
return result
def sort_expenses_by_month(expenses_list):
result = {}
for i in expenses_list:
expense_month = str(i['date'].month)
expense_year = str(i['date'].year)
period = expense_year + '-' + expense_month
if period not in result:
result[period] = []
result[period].append(i)
return result
def get_filename():
return sys.argv[1:]
def format_amount(amount):
print("{:10.2f}".format(amount))
def format_column(text):
return "{:10.2f}".format(text)
def date_from_string(str_date, pattern):
return datetime.datetime.strptime(str_date, pattern).date()
def order_by_category(expenses, categories):
result = {}
# initiate result
for i in categories:
result[i] = {
'amount': 0,
'obj': []
}
for i in expenses:
is_categorized = False
for j in categories:
for k in categories[j]:
if k.lower() in i['description'].lower():
result[j]['amount'] += i['amount']
result[j]['obj'].append(i)
is_categorized = True
if not is_categorized:
result['unCategorized']['amount'] += i['amount']
result['unCategorized']['obj'].append(i)
return result
|
mit
| -471,902,158,696,964,860
| 29.244681
| 120
| 0.594442
| false
| 3.567127
| false
| false
| false
|
nayas360/pyterm
|
bin/set.py
|
1
|
1471
|
# set command to set global variables
from lib.utils import *
def _help():
usage = '''
Usage: set [options] (var) [value]
[options]:
-h Print this help.
-del (var) Delete variable
(var) if defined.
where (var) is a valid
global variable
if [value] is not given,
current value is returned
'''
print(usage)
def main(argv):
if '-h' in argv:
_help()
return
# The shell doesnt send the
# command name in the arg list
# so the next line is not needed
# anymore
# argv.pop(0) #remove arg
# to show all vars
if len(argv) < 1:
for i in prop.vars():
print(i, ' = ', prop.get(i))
return
if '-del' in argv:
try:
var = argv[1]
# detect system vars
if var == 'save_state' or var == 'c_char':
err(4, add='Cant delete system variable "' + var + '"')
return
prop.delete(var)
return
except IndexError:
err(4, add='variable name was missing')
return
var = argv[0]
if len(argv) < 2:
val = prop.get(var)
if val == NULL:
err(4, var)
return
print(val)
return
# remove name of var
argv.pop(0)
# make the rest the val
val = make_s(argv)
try:
prop.set(var, val)
except ValueError:
err(4, add="can't create this variable")
|
mit
| 2,858,846,573,396,601,000
| 20.632353
| 71
| 0.507138
| false
| 3.752551
| false
| false
| false
|
WladimirSidorenko/DiscourseSenser
|
dsenser/xgboost/xgboostbase.py
|
1
|
2813
|
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
"""Module providing abstract interface class for XGBoost sense calssification.
Attributes:
XGBoostBaseSenser (class):
abstract class defining interface for explicit and implicit classifier
"""
##################################################################
# Imports
from __future__ import absolute_import, print_function
from sklearn.feature_extraction import DictVectorizer
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score, make_scorer
from sklearn.pipeline import Pipeline
from xgboost import XGBClassifier
##################################################################
# Constants
MAX_DEPTH = 9 # maximim depth of tree
NTREES = 600 # number of tree estimators
ALPHA = 0.05 # learning rate
BASE_PARAM_GRID = {"clf__max_depth": [3 * i for i in xrange(1, 3)],
"clf__n_estimators": [100 * i for i in xrange(1, 2)]}
BASE_N_JOBS = 1 # xgboost does not support grid parallelization
# as it relies on openmp
##################################################################
# Class
class XGBoostBaseSenser(object):
"""Base sense classifier using XGBoost.
"""
def __init__(self, a_clf=None, a_grid_search=False):
"""Class constructor.
Args:
a_clf (classifier or None):
classifier to use or None for default
a_grid_search (bool): use grid search for estimating
hyper-parameters
"""
classifier = a_clf
self._gs = a_grid_search
if a_clf is None:
classifier = XGBClassifier(max_depth=MAX_DEPTH,
n_estimators=NTREES,
learning_rate=ALPHA,
objective="multi:softprob")
self._clf = classifier
# latest version of XGBoost cannot deal with non-sparse feature vectors
self._model = Pipeline([("vect", DictVectorizer()),
("clf", classifier)])
def _predict(self, a_feats, a_ret, a_i):
"""Method for predicting sense of single relation.
Args:
a_feats (dict):
features of the input instance
a_ret (np.array):
output prediction vector
a_i (int):
row index in the output vector
Returns:
void:
Note:
updates ``a_ret`` in place
"""
ret = self._model.predict_proba(a_feats)[0]
if self._clf is None:
a_ret[a_i] += ret
else:
for i, j in enumerate(ret):
a_ret[a_i][self._clf._le.inverse_transform(i)] += j
|
mit
| 6,529,310,863,707,153,000
| 31.709302
| 79
| 0.520441
| false
| 4.515249
| false
| false
| false
|
neilLasrado/erpnext
|
erpnext/projects/doctype/timesheet/timesheet.py
|
1
|
14333
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import json
from datetime import timedelta
from erpnext.controllers.queries import get_match_cond
from frappe.utils import flt, time_diff_in_hours, get_datetime, getdate, cint, date_diff, add_to_date
from frappe.model.document import Document
from erpnext.manufacturing.doctype.workstation.workstation import (check_if_within_operating_hours,
WorkstationHolidayError)
from erpnext.manufacturing.doctype.manufacturing_settings.manufacturing_settings import get_mins_between_operations
class OverlapError(frappe.ValidationError): pass
class OverWorkLoggedError(frappe.ValidationError): pass
class Timesheet(Document):
def validate(self):
self.set_employee_name()
self.set_status()
self.validate_dates()
self.validate_time_logs()
self.calculate_std_hours()
self.update_cost()
self.calculate_total_amounts()
self.calculate_percentage_billed()
self.set_dates()
def set_employee_name(self):
if self.employee and not self.employee_name:
self.employee_name = frappe.db.get_value('Employee', self.employee, 'employee_name')
def calculate_total_amounts(self):
self.total_hours = 0.0
self.total_billable_hours = 0.0
self.total_billed_hours = 0.0
self.total_billable_amount = 0.0
self.total_costing_amount = 0.0
self.total_billed_amount = 0.0
for d in self.get("time_logs"):
self.update_billing_hours(d)
self.update_time_rates(d)
self.total_hours += flt(d.hours)
self.total_costing_amount += flt(d.costing_amount)
if d.billable:
self.total_billable_hours += flt(d.billing_hours)
self.total_billable_amount += flt(d.billing_amount)
self.total_billed_amount += flt(d.billing_amount) if d.sales_invoice else 0.0
self.total_billed_hours += flt(d.billing_hours) if d.sales_invoice else 0.0
def calculate_percentage_billed(self):
self.per_billed = 0
if self.total_billed_amount > 0 and self.total_billable_amount > 0:
self.per_billed = (self.total_billed_amount * 100) / self.total_billable_amount
def update_billing_hours(self, args):
if args.billable:
if flt(args.billing_hours) == 0.0:
args.billing_hours = args.hours
else:
args.billing_hours = 0
def set_status(self):
self.status = {
"0": "Draft",
"1": "Submitted",
"2": "Cancelled"
}[str(self.docstatus or 0)]
if self.per_billed == 100:
self.status = "Billed"
if self.salary_slip:
self.status = "Payslip"
if self.sales_invoice and self.salary_slip:
self.status = "Completed"
def set_dates(self):
if self.docstatus < 2 and self.time_logs:
start_date = min([getdate(d.from_time) for d in self.time_logs])
end_date = max([getdate(d.to_time) for d in self.time_logs])
if start_date and end_date:
self.start_date = getdate(start_date)
self.end_date = getdate(end_date)
def calculate_std_hours(self):
std_working_hours = frappe.get_value("Company", self.company, 'standard_working_hours')
for time in self.time_logs:
if time.from_time and time.to_time:
if flt(std_working_hours) and date_diff(time.to_time, time.from_time):
time.hours = flt(std_working_hours) * date_diff(time.to_time, time.from_time)
else:
if not time.hours:
time.hours = time_diff_in_hours(time.to_time, time.from_time)
def before_cancel(self):
self.set_status()
def on_cancel(self):
self.update_task_and_project()
def on_submit(self):
self.validate_mandatory_fields()
self.update_task_and_project()
def validate_mandatory_fields(self):
for data in self.time_logs:
if not data.from_time and not data.to_time:
frappe.throw(_("Row {0}: From Time and To Time is mandatory.").format(data.idx))
if not data.activity_type and self.employee:
frappe.throw(_("Row {0}: Activity Type is mandatory.").format(data.idx))
if flt(data.hours) == 0.0:
frappe.throw(_("Row {0}: Hours value must be greater than zero.").format(data.idx))
def update_task_and_project(self):
tasks, projects = [], []
for data in self.time_logs:
if data.task and data.task not in tasks:
task = frappe.get_doc("Task", data.task)
task.update_time_and_costing()
task.save()
tasks.append(data.task)
elif data.project and data.project not in projects:
frappe.get_doc("Project", data.project).update_project()
projects.append(data.project)
def validate_dates(self):
for data in self.time_logs:
if data.from_time and data.to_time and time_diff_in_hours(data.to_time, data.from_time) < 0:
frappe.throw(_("To date cannot be before from date"))
def validate_time_logs(self):
for data in self.get('time_logs'):
self.validate_overlap(data)
self.validate_task_project()
def validate_overlap(self, data):
settings = frappe.get_single('Projects Settings')
self.validate_overlap_for("user", data, self.user, settings.ignore_user_time_overlap)
self.validate_overlap_for("employee", data, self.employee, settings.ignore_employee_time_overlap)
def validate_task_project(self):
for log in self.time_logs:
log.project = log.project or frappe.db.get_value("Task", log.task, "project")
def validate_overlap_for(self, fieldname, args, value, ignore_validation=False):
if not value or ignore_validation:
return
existing = self.get_overlap_for(fieldname, args, value)
if existing:
frappe.throw(_("Row {0}: From Time and To Time of {1} is overlapping with {2}")
.format(args.idx, self.name, existing.name), OverlapError)
def get_overlap_for(self, fieldname, args, value):
cond = "ts.`{0}`".format(fieldname)
if fieldname == 'workstation':
cond = "tsd.`{0}`".format(fieldname)
existing = frappe.db.sql("""select ts.name as name, tsd.from_time as from_time, tsd.to_time as to_time from
`tabTimesheet Detail` tsd, `tabTimesheet` ts where {0}=%(val)s and tsd.parent = ts.name and
(
(%(from_time)s > tsd.from_time and %(from_time)s < tsd.to_time) or
(%(to_time)s > tsd.from_time and %(to_time)s < tsd.to_time) or
(%(from_time)s <= tsd.from_time and %(to_time)s >= tsd.to_time))
and tsd.name!=%(name)s
and ts.name!=%(parent)s
and ts.docstatus < 2""".format(cond),
{
"val": value,
"from_time": args.from_time,
"to_time": args.to_time,
"name": args.name or "No Name",
"parent": args.parent or "No Name"
}, as_dict=True)
# check internal overlap
for time_log in self.time_logs:
if not (time_log.from_time and time_log.to_time
and args.from_time and args.to_time): continue
if (fieldname != 'workstation' or args.get(fieldname) == time_log.get(fieldname)) and \
args.idx != time_log.idx and ((args.from_time > time_log.from_time and args.from_time < time_log.to_time) or
(args.to_time > time_log.from_time and args.to_time < time_log.to_time) or
(args.from_time <= time_log.from_time and args.to_time >= time_log.to_time)):
return self
return existing[0] if existing else None
def update_cost(self):
for data in self.time_logs:
if data.activity_type or data.billable:
rate = get_activity_cost(self.employee, data.activity_type)
hours = data.billing_hours or 0
costing_hours = data.billing_hours or data.hours or 0
if rate:
data.billing_rate = flt(rate.get('billing_rate')) if flt(data.billing_rate) == 0 else data.billing_rate
data.costing_rate = flt(rate.get('costing_rate')) if flt(data.costing_rate) == 0 else data.costing_rate
data.billing_amount = data.billing_rate * hours
data.costing_amount = data.costing_rate * costing_hours
def update_time_rates(self, ts_detail):
if not ts_detail.billable:
ts_detail.billing_rate = 0.0
@frappe.whitelist()
def get_projectwise_timesheet_data(project, parent=None):
cond = ''
if parent:
cond = "and parent = %(parent)s"
return frappe.db.sql("""select name, parent, billing_hours, billing_amount as billing_amt
from `tabTimesheet Detail` where parenttype = 'Timesheet' and docstatus=1 and project = %(project)s {0} and billable = 1
and sales_invoice is null""".format(cond), {'project': project, 'parent': parent}, as_dict=1)
@frappe.whitelist()
def get_timesheet(doctype, txt, searchfield, start, page_len, filters):
if not filters: filters = {}
condition = ""
if filters.get("project"):
condition = "and tsd.project = %(project)s"
return frappe.db.sql("""select distinct tsd.parent from `tabTimesheet Detail` tsd,
`tabTimesheet` ts where
ts.status in ('Submitted', 'Payslip') and tsd.parent = ts.name and
tsd.docstatus = 1 and ts.total_billable_amount > 0
and tsd.parent LIKE %(txt)s {condition}
order by tsd.parent limit %(start)s, %(page_len)s"""
.format(condition=condition), {
'txt': '%' + txt + '%',
"start": start, "page_len": page_len, 'project': filters.get("project")
})
@frappe.whitelist()
def get_timesheet_data(name, project):
data = None
if project and project!='':
data = get_projectwise_timesheet_data(project, name)
else:
data = frappe.get_all('Timesheet',
fields = ["(total_billable_amount - total_billed_amount) as billing_amt", "total_billable_hours as billing_hours"], filters = {'name': name})
return {
'billing_hours': data[0].billing_hours if data else None,
'billing_amount': data[0].billing_amt if data else None,
'timesheet_detail': data[0].name if data and project and project!= '' else None
}
@frappe.whitelist()
def make_sales_invoice(source_name, item_code=None, customer=None, project=None):
target = frappe.new_doc("Sales Invoice")
timesheet = frappe.get_doc('Timesheet', source_name)
if not timesheet.total_billable_hours:
frappe.throw(_("Invoice can't be made for zero billing hour"))
if timesheet.total_billable_hours == timesheet.total_billed_hours:
frappe.throw(_("Invoice already created for all billing hours"))
hours = flt(timesheet.total_billable_hours) - flt(timesheet.total_billed_hours)
billing_amount = flt(timesheet.total_billable_amount) - flt(timesheet.total_billed_amount)
billing_rate = billing_amount / hours
target.company = timesheet.company
if customer:
target.customer = customer
if project:
target.project = project
if item_code:
target.append('items', {
'item_code': item_code,
'qty': hours,
'rate': billing_rate
})
target.append('timesheets', {
'time_sheet': timesheet.name,
'billing_hours': hours,
'billing_amount': billing_amount,
'item_code': item_code
})
target.run_method("calculate_billing_amount_for_timesheet")
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
return target
@frappe.whitelist()
def make_salary_slip(source_name, target_doc=None):
target = frappe.new_doc("Salary Slip")
set_missing_values(source_name, target)
target.run_method("get_emp_and_leave_details")
return target
def set_missing_values(time_sheet, target):
doc = frappe.get_doc('Timesheet', time_sheet)
target.employee = doc.employee
target.employee_name = doc.employee_name
target.salary_slip_based_on_timesheet = 1
target.start_date = doc.start_date
target.end_date = doc.end_date
target.posting_date = doc.modified
target.total_working_hours = doc.total_hours
target.append('timesheets', {
'time_sheet': doc.name,
'working_hours': doc.total_hours
})
@frappe.whitelist()
def get_activity_cost(employee=None, activity_type=None):
rate = frappe.db.get_values("Activity Cost", {"employee": employee,
"activity_type": activity_type}, ["costing_rate", "billing_rate"], as_dict=True)
if not rate:
rate = frappe.db.get_values("Activity Type", {"activity_type": activity_type},
["costing_rate", "billing_rate"], as_dict=True)
return rate[0] if rate else {}
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
filters = json.loads(filters)
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Timesheet", filters)
return frappe.db.sql("""select `tabTimesheet Detail`.name as name,
`tabTimesheet Detail`.docstatus as status, `tabTimesheet Detail`.parent as parent,
from_time as start_date, hours, activity_type,
`tabTimesheet Detail`.project, to_time as end_date,
CONCAT(`tabTimesheet Detail`.parent, ' (', ROUND(hours,2),' hrs)') as title
from `tabTimesheet Detail`, `tabTimesheet`
where `tabTimesheet Detail`.parent = `tabTimesheet`.name
and `tabTimesheet`.docstatus < 2
and (from_time <= %(end)s and to_time >= %(start)s) {conditions} {match_cond}
""".format(conditions=conditions, match_cond = get_match_cond('Timesheet')),
{
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
def get_timesheets_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by="modified"):
user = frappe.session.user
# find customer name from contact.
customer = ''
timesheets = []
contact = frappe.db.exists('Contact', {'user': user})
if contact:
# find customer
contact = frappe.get_doc('Contact', contact)
customer = contact.get_link_for('Customer')
if customer:
sales_invoices = [d.name for d in frappe.get_all('Sales Invoice', filters={'customer': customer})] or [None]
projects = [d.name for d in frappe.get_all('Project', filters={'customer': customer})]
# Return timesheet related data to web portal.
timesheets = frappe.db.sql('''
SELECT
ts.name, tsd.activity_type, ts.status, ts.total_billable_hours,
COALESCE(ts.sales_invoice, tsd.sales_invoice) AS sales_invoice, tsd.project
FROM `tabTimesheet` ts, `tabTimesheet Detail` tsd
WHERE tsd.parent = ts.name AND
(
ts.sales_invoice IN %(sales_invoices)s OR
tsd.sales_invoice IN %(sales_invoices)s OR
tsd.project IN %(projects)s
)
ORDER BY `end_date` ASC
LIMIT {0}, {1}
'''.format(limit_start, limit_page_length), dict(sales_invoices=sales_invoices, projects=projects), as_dict=True) #nosec
return timesheets
def get_list_context(context=None):
return {
"show_sidebar": True,
"show_search": True,
'no_breadcrumbs': True,
"title": _("Timesheets"),
"get_list": get_timesheets_list,
"row_template": "templates/includes/timesheet/timesheet_row.html"
}
|
gpl-3.0
| -2,669,668,574,416,058,400
| 34.654229
| 144
| 0.701388
| false
| 3.007344
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.