text stringlengths 4 1.02M | meta dict |
|---|---|
import pymysql as sql
import platform,re, os, shutil, signal, sys, _thread as thread, time, urllib, socketserver as SocketServer, subprocess, codecs
if "-judge" not in sys.argv:
print("\nArgus Online Judge : Execution Protocol (Linux Version 1.0)")
print("\nCommand Line Options :")
print(" -judge : Connect to the server and start judging submissions.")
print(" -cache : Use IO Files in Current Directory instead of downloading them.")
print()
sys.exit(0)
timeoffset = 0
# File Read/Write Functions
def file_read(filename):
if not os.path.exists(filename): return ""
f = codecs.open(filename,"r","utf-8"); d = f.read(); f.close(); return d.replace("\r","")
def file_write(filename,data):
f = codecs.open(filename,"w","utf-8"); f.write(data.replace("\r","")); f.close()
def getEnv(key, default):
return os.environ[key] if key in os.environ else default
def getDockerSecretValue(key, fallback):
file = getEnv(key, None)
if file and os.path.exists(file):
return file_read(file).strip()
else:
return fallback
# Initialize Database and judge Constants
sql_hostname = getDockerSecretValue('AURORA_SQL_HOSTNAME_FILE', getEnv('AURORA_SQL_HOSTNAME', '127.0.0.1'))
sql_hostport = getDockerSecretValue('AURORA_SQL_HOSTPORT_FILE', getEnv('AURORA_SQL_HOSTPORT', 3306))
sql_username = getDockerSecretValue('AURORA_SQL_USERNAME_FILE', getEnv('AURORA_SQL_USERNAME', 'aurora'))
sql_password = getDockerSecretValue('AURORA_SQL_PASSWORD_FILE', getEnv('AURORA_SQL_PASSWORD', 'aurora'))
sql_database = getDockerSecretValue('AURORA_SQL_DATABASE_FILE', getEnv('AURORA_SQL_DATABASE', 'aurora_main'))
HOST, PORT = "0.0.0.0", 8723
#timeoffset = 19800
# Initialize Language Constants
php_prefix = "<?php ini_set('log_errors',1); ini_set('error_log','env/error.txt'); ?>"
ioeredirect = " 0<env/input.txt 1>env/output.txt 2>env/error.txt"
# Addition of new Language requires change below
# NOTE : You may need to add few lines in 'create' function too on addtion of new language.
langarr = {
"AWK": {"extension": "awk", "system":"find /usr/bin/ -name awk", "execute":"awk -f env/[exename].awk[inputfile]"},
"Bash": {"extension": "sh", "system":"find /bin/ -name bash", "execute":"bash env/[exename].sh[inputfile]"},
"Brain" : {"extension": "b", "system":"find /usr/bin/ -name bf", "execute":"bf env/[exename].b[inputfile]"},
"C" : {"extension":"c", "system":"find /usr/bin/ -name cc", "compile":"cc env/[codefilename].c -O2 -fomit-frame-pointer -o env/[codefilename] -lm"+ioeredirect, "execute":"env/[exename][inputfile]"},
"C++": {"extension": "cpp", "system": "find /usr/bin/ -name g++", "compile": "g++ env/[codefilename].cpp -O2 -fomit-frame-pointer -o env/[codefilename]"+ioeredirect, "execute": "env/[exename][inputfile]"},
"C#" : {"extension": "cs", "system":"find /usr/bin/ -name mcs", "compile":"mcs env/[codefilename].cs -out:env/[codefilename].exe"+ioeredirect, "execute":"mono env/[exename].exe[inputfile]"},
"Java" : {"extension" : "java", "system":"find /usr/bin/ -name javac", "compile":"javac -g:none -Xlint -d env env/[codefilename].java"+ioeredirect, "execute":"java -client -classpath env [exename][inputfile]"},
"JavaScript": {"extension":"js", "system": "find /usr/bin/ -name rhino", "execute":"rhino -f env/[exename].js[inputfile]"},
"Pascal": {"extension":"pas", "system":"find /usr/bin/ -name fpc", "compile":"fpc env/[codefilename].pas -O2 -oenv/[codefilename]"+ioeredirect, "execute":"env/[exename][inputfile]"},
"Perl": {"extension":"pl", "system":"find /usr/bin/ -name perl", "execute":"perl env/[exename].pl[inputfile]"},
"PHP": {"extension":"php", "system":"find /usr/bin/ -name php", "execute":"php -f env/[exename].php[inputfile]"},
"Python": {"extension":"py", "system":"find /usr/bin/ -name python2", "execute":"python2 env/[exename].py[inputfile]"},
"Python3": {"extension":"py", "system":"find /usr/bin/ -name python3", "execute":"python3 env/[exename].py[inputfile]"},
"Ruby": {"extension":"rb", "system":"find /usr/bin/ -name ruby", "execute":"ruby env/[exename].rb[inputfile]"},
"Text": {"extension":"txt"}
}
# Define useful variables
running = 0
mypid = int(os.getpid())
timediff = 0
languages = []
# Systems Check
def system():
global languages
if not os.path.isdir("env"): os.mkdir("env")
for lang in langarr:
if(lang != "Text" and os.popen(langarr[lang]["system"]).read()!=""): languages.append(lang)
# Program Compilation
def create(codefilename,language):
if(language not in ('C','C++','C#','Java','Pascal')): return
print("Compiling Code File ...")
result = None
compilecmd = langarr[language]["compile"]
compilecmd = compilecmd.replace("[codefilename]", codefilename)
print(compilecmd)
if language=="Java":
os.system(compilecmd)
if ((not os.path.exists("env/"+codefilename+".class")) and (not os.path.exists("env/main/"+codefilename+".class"))):
result="CE"
elif language=="C#":
os.system(compilecmd)
if not os.path.exists("env/"+codefilename+".exe"):
result="CE"
else:
os.system(compilecmd)
if not os.path.exists("env/"+codefilename):
result="CE"
if result==None: print("Code File Compiled to Executable.")
else: print("Compilation Error")
return result
# Program Execution
def execute(exename,language, timelimit):
# Check if docker secrets are used and if so make them readable to only
# root user.
if(os.path.exists("/run/secrets")): os.system("chmod -R 500 /run/secrets")
global running, timediff
inputfile = " <env/input.txt 1>env/output.txt 2>env/error.txt"
if language == "Java" and not(os.path.exists("env/"+exename+".class")):
exename = "main/"+exename
# Limiting max process that can be spawned by a user to 100 to protect against
# fork bombs. Also, switching the user to 'judge' to run the submitted program.
# After the allocated time limit, all process spawned by 'judge' user is killed.
cmd = 'ulimit -p 100; su judge -c \"'+langarr[language]["execute"]+"; exit;\""
cmd = cmd.replace("[exename]", exename)
cmd = cmd.replace("[inputfile]", inputfile)
# Change permission to allow only root user to be
# able to execute commands in the directory.
os.system("chmod 100 .")
# Make files input, error and output file accessible
# to every user
if(os.path.exists("env/input.txt")): os.system("chmod 777 env/input.txt")
if(os.path.exists("env/error.txt")): os.system("chmod 777 env/error.txt")
if(os.path.exists("env/output.txt")): os.system("chmod 777 env/output.txt")
# Run program for limited time
starttime = time.time()
proc = subprocess.Popen([cmd], shell=True, preexec_fn=os.setsid)
try:
print(proc.communicate(timeout=timelimit))
t = proc.returncode
except subprocess.TimeoutExpired:
t = 124 # Code for TLE
endtime = time.time()
timediff = endtime - starttime
# kill all process spawned by user 'judge'
os.system("pkill -u judge")
# Make directly readable / executable again to root user.
os.system("chmod 750 .")
print("Return Code : "+str(t))
return t
# Perform system checks
if(platform.system()!='Linux'):
print("Error : This script can only be run on Linux.")
sys.exit(0)
# Print Heading
os.system("clear")
print("\nArgus Online Judge : Execution Protocol\n")
# System Check
system()
if len(languages)==0:
print("Error : No Languages supported on this System.")
sys.exit(1)
else: languages.append('Text')
print("Supported Languages : "+str(languages)+"\n")
sys.stdout.flush()
def runjudge(runid):
try:
# Connect to Database
print(runid)
print("Connecting to Server ...")
link = sql.connect(host=sql_hostname,port=sql_hostport,user=sql_username,passwd=sql_password,db=sql_database,charset='utf8')
cursor = link.cursor(sql.cursors.DictCursor)
print("Connected to Server ...")
print()
if "-cache" not in sys.argv:
cursor.execute("SELECT runs.rid as rid,runs.pid as pid,tid,runs.language,subs_code.name as name,subs_code.code as code,error,input,problems.output as output,timelimit FROM runs,problems, subs_code WHERE problems.pid=runs.pid and runs.access!='deleted' and runs.rid = subs_code.rid and runs.rid = '"+str(runid)+"' and runs.language in "+str(tuple(languages))+" ORDER BY runs.rid ASC LIMIT 0,1")
else:
cursor.execute("SELECT runs1.rid as rid,runs1.pid as pid,tid,runs1.language,subs_code.name as name,subs_code.code as code,error,timelimit FROM runs AS runs1,problems, subs_code WHERE problems.pid=runs1.pid and runs1.rid = subs_code.rid and runs1.access!='deleted' and runs1.rid = '"+str(runid)+"' and runs1.language in "+str(tuple(languages))+" ORDER BY runs1.rid ASC LIMIT 0,1")
# Select an Unjudged Submission
run = cursor.fetchone()
cursor.execute("UPDATE runs SET result='...' WHERE rid='%d'" % (run["rid"]))
print("Selected Run ID %d for Evaluation." % (run["rid"]))
os.system("rm -r env/*")
print("Cleared Environment for Program Execution.")
# Initialize Variables
result = None
timetaken = 0
sys.stdout.flush()
# Write Code & Input File
if result==None:
if run["language"]=="Java": codefilename = run["name"]
elif run["language"]=="Text": codefilename = "output"
else: codefilename = "code"
codefile = codecs.open("env/"+codefilename+"."+langarr[run["language"]]["extension"],"w","utf-8")
if(run["language"]=="PHP"): codefile.write(php_prefix) # append prefix for PHP
codefile.write(run["code"].replace("\r","")); codefile.close()
if "-cache" not in sys.argv: file_write("env/input.txt",run["input"])
else:
if not(os.path.exists("io_cache")):
os.mkdir("io_cache")
try:
with open("io_cache/Aurora Online Judge - Problem ID "+str(run["pid"])+" - Input.txt"): pass
except IOError:
cursor.execute("Select input, output from problems where pid ="+str(run["pid"]))
filecreate = cursor.fetchone()
file_write("io_cache/Aurora Online Judge - Problem ID "+str(run["pid"])+" - Input.txt", filecreate['input'])
file_write("io_cache/Aurora Online Judge - Problem ID "+str(run["pid"])+" - Output.txt", filecreate['output'])
shutil.copyfile("io_cache/Aurora Online Judge - Problem ID "+str(run["pid"])+" - Input.txt","env/input.txt")
print("Code & Input File Created.")
# Compile, if required
if result==None:
result = create(codefilename,run["language"]) # Compile
sys.stdout.flush()
# Increase Time Limit in case some languages
if run["language"] in ('Java', 'Python', 'Python3', 'Ruby', 'PHP', 'C#', 'JavaScript'):
if run["language"] in ("Java", "C#", "JavaScript"):
run['timelimit'] *= 2
elif run["language"] in ("Python", "Ruby", "PHP", "Python3"):
run['timelimit'] *= 3
# Run the program through a new thread, and kill it after some time
if result==None and run["language"]!="Text":
print("Spawning process ...")
t = execute(codefilename,run["language"], run['timelimit'])
#while running==0: pass # Wait till process begins
print("Process Complete!")
if t == 124:
result = "TLE"
timetaken = run["timelimit"]
#kill(codefilename,run["language"])
file_write('env/error.txt', "Time Limit Exceeded - Process killed.")
elif t == 139:
file_write('env/error.txt', 'SIGSEGV||Segmentation fault (core dumped)\n'+file_read("env/error.txt"))
timetaken = timediff
elif t == 136:
file_write('env/error.txt', 'SIGFPE||Floating point exception\n'+file_read("env/error.txt"))
timetaken = timediff
elif t == 134:
file_write('env/error.txt', 'SIGABRT||Aborted\n'+file_read("env/error.txt"))
timetaken = timediff
elif t != 0:
file_write('env/error.txt', 'NZEC||return code : '+str(t)+"\n"+file_read("env/error.txt"))
timetaken = timediff
else:
timetaken = timediff
sys.stdout.flush()
# Compare the output
output = ""
if result==None and run["language"]!="Text" and file_read("env/error.txt")!="":
output = file_read("env/output.txt")
result = "RTE"
if result==None:
output = file_read("env/output.txt")
if "-cache" in sys.argv:
run["output"] = file_read("io_cache/Aurora Online Judge - Problem ID "+str(run["pid"])+" - Output.txt")
correct = run["output"].replace("\r","")
if run["output"] is None: run["output"] = ""
if(output==correct):
result="AC"
elif "S" in run["output"] and re.sub(" +"," ",re.sub("\n *","\n",re.sub(" *\n","\n",output)))==re.sub(" +"," ",re.sub("\n *","\n",re.sub(" *\n","\n",correct))): result = "AC"
elif(re.sub(r"\s","",output)==re.sub(r"\s","",correct)): result = "AC" if "P" in run["output"] else "PE"
else: result = "WA"
print("Output Judgement Complete.")
# Write results to database
error = file_read("env/error.txt")
cursor.execute("UPDATE runs SET time='%.3f',result='%s' WHERE rid=%d" % (float(timetaken),result,int(run["rid"])))
cursor.execute("UPDATE subs_code SET error=\"%s\",output=\"%s\" WHERE rid=%d" %(re.escape(error), re.escape(output),int(run["rid"])))
print("Result (%s,%.3f) updated on Server.\n" % (result,timetaken))
sys.stdout.flush()
# Commit changes
link.commit()
# Disconnect from Server
try: cursor.close()
except: pass
try: link.close()
except: pass
print("Disconnected from Server.\n")
sys.stdout.flush()
except sql.Error as e:
print("MySQL Error %d : %s\n" % (e.args[0],e.args[1]))
class MyTCPHandler(SocketServer.StreamRequestHandler):
def handle(self):
# self.rfile is a file-like object created by the handler;
# we can now use e.g. readline() instead of raw recv() calls
self.data = self.rfile.readline()
self.data = self.data.decode('utf-8')
# Likewise, self.wfile is a file-like object used to write back
# to the client
if(self.data == 'rejudge'):
print((("{} wrote:").format(self.client_address[0])))
print(self.data)
link = sql.connect(host=sql_hostname,port=sql_hostport,user=sql_username,passwd=sql_password,db=sql_database)
cursor = link.cursor(sql.cursors.DictCursor)
cursor.execute("SELECT rid FROM runs WHERE result is NULL and access != 'deleted'")
link.close()
i = 0
for i in range(cursor.rowcount):
try:
run = cursor.fetchone()
runjudge(run['rid'])
i = i + 1
except Exception as e:
print("Exception in RID : "+str(run['rid'])+"\n"+str(e)+"\n")
cursor.close()
elif(self.data[0:3] == 'del'):
print((("{} wrote:").format(self.client_address[0])))
print(self.data)
print("Deleting io files for pid - "+self.data[3:])
filename = "io_cache/Aurora Online Judge - Problem ID "+self.data[3:]+" - Input.txt"
if (os.path.exists(filename)):
os.remove(filename)
filename = "io_cache/Aurora Online Judge - Problem ID "+self.data[3:]+" - Output.txt"
if (os.path.exists(filename)):
os.remove(filename)
elif (len(self.data) > 0):
print((("{} wrote:").format(self.client_address[0])))
runjudge(int(self.data))
if __name__ == "__main__":
# Create the server, binding to localhost on port 8723
server = SocketServer.TCPServer((HOST, PORT), MyTCPHandler)
server.request_queue_size = 100
print('Queue Size : ', server.request_queue_size)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
print("Waiting for submissions... ")
try:
server.serve_forever()
except KeyboardInterrupt as e:
print(" Keyboard Interrupt Detected.\n")
except Exception as e:
print("Exception : "+str(e)+"\n")
# Release lock
try:
lock.close()
os.unlink("lock.txt")
except: pass
print("Released lock on Execution Protocol.\n")
# Terminate
print("Argus Online Judge : Execution Protocol Terminated.\n")
| {
"content_hash": "8df46dadf59e00e43807bf939c36c444",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 396,
"avg_line_length": 43.610169491525426,
"alnum_prop": 0.6717839098328799,
"repo_name": "pushkar8723/Aurora",
"id": "b26bc3616bd23a8cd1947b1037763d324262314f",
"size": "15438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Judge/judge.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30441"
},
{
"name": "Dockerfile",
"bytes": "1290"
},
{
"name": "HTML",
"bytes": "2632"
},
{
"name": "Hack",
"bytes": "4612"
},
{
"name": "JavaScript",
"bytes": "6081"
},
{
"name": "PHP",
"bytes": "235829"
},
{
"name": "Python",
"bytes": "19988"
},
{
"name": "Shell",
"bytes": "9227"
}
],
"symlink_target": ""
} |
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| {
"content_hash": "dbfa5b6549a2187734a52e56013a1285",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 24.647058823529413,
"alnum_prop": 0.7255369928400954,
"repo_name": "burzillibus/RobHome",
"id": "b96dc7d53d302f8da48899990a06dbc3fe2c0b16",
"size": "636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/bin/rst2xml.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2279171"
},
{
"name": "C++",
"bytes": "40049"
},
{
"name": "CSS",
"bytes": "317269"
},
{
"name": "HTML",
"bytes": "108270"
},
{
"name": "JavaScript",
"bytes": "966529"
},
{
"name": "Objective-C",
"bytes": "27586"
},
{
"name": "Python",
"bytes": "15017866"
},
{
"name": "Shell",
"bytes": "3234"
},
{
"name": "TeX",
"bytes": "1628"
},
{
"name": "XSLT",
"bytes": "152693"
}
],
"symlink_target": ""
} |
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
import sys
from .constants import eStart, eError, eItsMe
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| {
"content_hash": "f684035b1ba1eb63b55055b5171e777f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 108,
"avg_line_length": 40.69491525423729,
"alnum_prop": 0.6014160766347355,
"repo_name": "Branlala/docker-sickbeardfr",
"id": "a515c5768e929464ebd7e5b0faa6820b219ca053",
"size": "3568",
"binary": false,
"copies": "52",
"ref": "refs/heads/master",
"path": "sickbeard/lib/requests/packages/chardet2/sjisprober.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83278"
},
{
"name": "CSS",
"bytes": "155616"
},
{
"name": "JavaScript",
"bytes": "248414"
},
{
"name": "Python",
"bytes": "8146521"
},
{
"name": "Ruby",
"bytes": "2461"
},
{
"name": "Shell",
"bytes": "8791"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actualizacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto', models.TextField()),
],
options={
'verbose_name_plural': 'Actualizaci\xf3n',
},
),
migrations.CreateModel(
name='Alcance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto', models.TextField()),
],
options={
'verbose_name_plural': 'Alcance',
},
),
migrations.CreateModel(
name='InfoGeneral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto', models.TextField()),
],
options={
'verbose_name_plural': 'Texto Sistema de la Asociaci\xf3n de Productores',
},
),
migrations.CreateModel(
name='Objetivo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto', models.TextField()),
],
options={
'verbose_name_plural': 'Objetivo',
},
),
migrations.CreateModel(
name='SistemaInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto', models.TextField()),
],
options={
'verbose_name_plural': 'Texto Sistema de Informaci\xf3n',
},
),
]
| {
"content_hash": "0953b6403c1d7cfb7eb0b82e605c3515",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 114,
"avg_line_length": 32.296875,
"alnum_prop": 0.4929850024189647,
"repo_name": "ErickMurillo/aprocacaho",
"id": "b6c2d0dc24c606298c21d99b5fb317a520be4673",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configuracion/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236400"
},
{
"name": "HTML",
"bytes": "414224"
},
{
"name": "JavaScript",
"bytes": "133869"
},
{
"name": "Python",
"bytes": "220952"
}
],
"symlink_target": ""
} |
"""
A portmanteau word is a blend of two or more words, like 'mathelete',
which comes from 'math' and 'athelete'. You will write a function to
find the 'best' portmanteau word from a list of dictionary words.
Because 'portmanteau' is so easy to misspell, we will call our
function 'natalie' instead:
natalie(['word', ...]) == 'portmanteauword'
In this exercise the rules are: a portmanteau must be composed of
three non-empty pieces, start+mid+end, where both start+mid and
mid+end are among the list of words passed in. For example,
'adolescented' comes from 'adolescent' and 'scented', with
start+mid+end='adole'+'scent'+'ed'. A portmanteau must be composed
of two different words (not the same word twice).
That defines an allowable combination, but which is best? Intuitively,
a longer word is better, and a word is well-balanced if the mid is
about half the total length while start and end are about 1/4 each.
To make that specific, the score for a word w is the number of letters
in w minus the difference between the actual and ideal lengths of
start, mid, and end. (For the example word w='adole'+'scent'+'ed', the
start,mid,end lengths are 5,5,2 and the total length is 12. The ideal
start,mid,end lengths are 12/4,12/2,12/4 = 3,6,3. So the final score
is
12 - abs(5-3) - abs(5-6) - abs(2-3) = 8.
yielding a score of 12 - abs(5-(12/4)) - abs(5-(12/2)) -
abs(2-(12/4)) = 8.
The output of natalie(words) should be the best portmanteau, or None
if there is none.
Note (1): I got the idea for this question from
Darius Bacon. Note (2): In real life, many portmanteaux omit letters,
for example 'smoke' + 'fog' = 'smog'; we aren't considering those.
Note (3): The word 'portmanteau' is itself a portmanteau; it comes
from the French "porter" (to carry) + "manteau" (cloak), and in
English meant "suitcase" in 1871 when Lewis Carroll used it in
'Through the Looking Glass' to mean two words packed into one. Note
(4): the rules for 'best' are certainly subjective, and certainly
should depend on more things than just letter length. In addition to
programming the solution described here, you are welcome to explore
your own definition of best, and use your own word lists to come up
with interesting new results. Post your best ones in the discussion
forum. Note (5) The test examples will involve no more than a dozen or so
input words. But you could implement a method that is efficient with a
larger list of words.
"""
def score(one, two, three, word):
"""Get the score for a portmanteau following the rule:
len(word) - abs(one - len/4) - abs(two - len/2) - abs(three - len/4) = score
the closer score is to len the higher the score"""
return (word - abs(one-word/4) - abs(two - word/2) - abs(three - word/4))
def natalie(words):
"Find the best Portmanteau word formed from any two of the list of words."
results = {}
for first_word in words:
for second_word in words:
if first_word != second_word:
# print first_word, second_word
for l in range(len(first_word)):
pos = l
if second_word[0] == first_word[l] and first_word[pos:] in second_word and l != 0:
port = first_word + second_word[len(first_word[pos:]):]
# print port
one = len(first_word[:pos])
two = len(first_word[pos:])
three = len(second_word[len(first_word[pos:]):])
port_score = score(one, two, three, len(port))
if port_score not in results:
results[port_score] = port
# print results
return results[max(results)] if results else None
def test_natalie():
"Some test cases for natalie"
assert natalie(['adolescent', 'scented', 'centennial', 'always', 'ado']) in ('adolescented','adolescentennial')
assert natalie(['eskimo', 'escort', 'kimchee', 'kimono', 'cheese']) == 'eskimono'
assert natalie(['kimono', 'kimchee', 'cheese', 'serious', 'us', 'usage']) == 'kimcheese'
assert natalie(['circus', 'elephant', 'lion', 'opera', 'phantom']) == 'elephantom'
assert natalie(['programmer', 'coder', 'partying', 'merrymaking']) == 'programmerrymaking'
assert natalie(['int', 'intimate', 'hinter', 'hint', 'winter']) == 'hintimate'
assert natalie(['morass', 'moral', 'assassination']) == 'morassassination'
assert natalie(['entrepreneur', 'academic', 'doctor', 'neuropsychologist', 'neurotoxin', 'scientist', 'gist']) in ('entrepreneuropsychologist', 'entrepreneurotoxin')
assert natalie(['perspicacity', 'cityslicker', 'capability', 'capable']) == 'perspicacityslicker'
assert natalie(['backfire', 'fireproof', 'backflow', 'flowchart', 'background', 'groundhog']) == 'backgroundhog'
assert natalie(['streaker', 'nudist', 'hippie', 'protestor', 'disturbance', 'cops']) == 'nudisturbance'
assert natalie(['night', 'day']) == None
assert natalie(['dog', 'dogs']) == None
assert natalie(['test']) == None
assert natalie(['']) == None
assert natalie(['ABC', '123']) == None
assert natalie([]) == None
return 'tests pass'
# print natalie(['eskimo', 'escort', 'kimchee', 'kimono', 'cheese'])
print test_natalie()
print natalie(['dog', 'dogs'])
| {
"content_hash": "c4935252c94606c8df516b5c40f62d0b",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 169,
"avg_line_length": 50.96190476190476,
"alnum_prop": 0.6544571108204074,
"repo_name": "feredean/cs313",
"id": "c8a90a5839653ba143b11e808ec36f54b2606896",
"size": "5377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notes/7_portmanteau.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210982"
}
],
"symlink_target": ""
} |
import numpy
import pickle
from nutils import matrix, sparse, testing, warnings
@testing.parametrize
class backend(testing.TestCase):
n = 100
complex = False
def setUp(self):
super().setUp()
try:
self.enter_context(matrix.backend(self.backend))
except matrix.BackendNotAvailable:
self.skipTest('backend is unavailable')
self.offdiag = -1+.5j if self.complex else -1
self.exact = 2 * numpy.eye(self.n) + self.offdiag * numpy.eye(self.n, self.n, 1) + self.offdiag * numpy.eye(self.n, self.n, -1)
data = sparse.prune(sparse.fromarray(self.exact), inplace=True)
assert len(data) == self.n*3-2
self.matrix = matrix.fromsparse(data, inplace=True)
def test_size(self):
self.assertEqual(self.matrix.size, self.n**2)
def test_export_dense(self):
array = self.matrix.export('dense')
self.assertEqual(array.shape, (self.n, self.n))
numpy.testing.assert_equal(actual=array, desired=self.exact)
def test_export_coo(self):
data, (row, col) = self.matrix.export('coo')
numpy.testing.assert_equal(row[0::3], numpy.arange(self.n))
numpy.testing.assert_equal(col[0::3], numpy.arange(self.n))
numpy.testing.assert_equal(data[0::3], 2)
numpy.testing.assert_equal(row[1::3], numpy.arange(self.n-1))
numpy.testing.assert_equal(col[1::3], numpy.arange(1, self.n))
numpy.testing.assert_equal(data[1::3], self.offdiag)
numpy.testing.assert_equal(row[2::3], numpy.arange(1, self.n))
numpy.testing.assert_equal(col[2::3], numpy.arange(self.n-1))
numpy.testing.assert_equal(data[2::3], self.offdiag)
def test_export_csr(self):
data, indices, indptr = self.matrix.export('csr')
self.assertEqual(indptr[0], 0)
self.assertEqual(indptr[-1], len(data))
numpy.testing.assert_equal(data[0::3], 2)
numpy.testing.assert_equal(data[1::3], self.offdiag)
numpy.testing.assert_equal(data[2::3], self.offdiag)
numpy.testing.assert_equal(indices[0::3], numpy.arange(self.n))
numpy.testing.assert_equal(indices[1::3], numpy.arange(1, self.n))
numpy.testing.assert_equal(indices[2::3], numpy.arange(self.n-1))
numpy.testing.assert_equal(indptr[1:-1], numpy.arange(2, 3*(self.n-1), 3))
def test_neg(self):
neg = -self.matrix
numpy.testing.assert_equal(actual=neg.export('dense'), desired=-self.exact)
def test_mul(self):
mul = self.matrix * 1.5
numpy.testing.assert_equal(actual=mul.export('dense'), desired=self.exact * 1.5)
with self.assertRaises(TypeError):
self.matrix * 'foo'
def test_matvec(self):
x = numpy.arange(self.n)
b = numpy.zeros(self.n)
b[0] = -1
b[-1] = self.n
if self.complex:
b = b + x * 1j
b[0] += .5j
b[-1] -= .5j * self.n
numpy.testing.assert_equal(actual=self.matrix @ x, desired=b)
def test_matmat(self):
X = numpy.arange(self.n*2).reshape(-1, 2)
B = numpy.zeros((self.n, 2))
B[0] = -2, -1
B[-1] = 2*self.n, 2*(self.n+.5)
if self.complex:
B = B + numpy.arange(self.n*2).reshape(-1, 2) * 1j
B[0] += 1j, .5j
B[-1] -= 1j * self.n, 1j * (self.n+.5)
numpy.testing.assert_equal(actual=self.matrix @ X, desired=B)
with self.assertRaises(TypeError):
self.matrix @ 'foo'
with self.assertRaises(matrix.MatrixError):
self.matrix @ numpy.arange(self.n+1)
def test_rmul(self):
rmul = 1.5 * self.matrix
numpy.testing.assert_equal(actual=rmul.export('dense'), desired=self.exact * 1.5)
with self.assertRaises(TypeError):
'foo' / self.matrix
def test_div(self):
div = self.matrix / 1.5
numpy.testing.assert_equal(actual=div.export('dense'), desired=self.exact / 1.5)
with self.assertRaises(TypeError):
self.matrix / 'foo'
def test_add(self):
j = self.n//2
v = 10.
other = matrix.assemble(numpy.array([v]*self.n), numpy.array([numpy.arange(self.n), [j]*self.n]), shape=(self.n, self.n))
add = self.matrix + other
numpy.testing.assert_equal(actual=add.export('dense'), desired=self.exact + numpy.eye(self.n)[j]*v)
with self.assertRaises(TypeError):
self.matrix + 'foo'
with self.assertRaises(matrix.MatrixError):
self.matrix + matrix.eye(self.n+1)
def test_sub(self):
j = self.n//2
v = 10.
other = matrix.assemble(numpy.array([v]*self.n), numpy.array([numpy.arange(self.n), [j]*self.n]), shape=(self.n, self.n))
sub = self.matrix - other
numpy.testing.assert_equal(actual=sub.export('dense'), desired=self.exact - numpy.eye(self.n)[j]*v)
with self.assertRaises(TypeError):
self.matrix - 'foo'
with self.assertRaises(matrix.MatrixError):
self.matrix - matrix.eye(self.n+1)
def test_transpose(self):
asym = matrix.assemble(numpy.arange(1, 7), numpy.array([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]), shape=(3, 3))
exact = numpy.array([[1, 2, 3], [0, 4, 5], [0, 0, 6]], dtype=float)
transpose = asym.T
numpy.testing.assert_equal(actual=transpose.export('dense'), desired=exact.T)
def test_rowsupp(self):
sparse = matrix.assemble(numpy.array([1e-10, 0, 1, 1]), numpy.array([[0, 0, 2, 2], [0, 1, 1, 2]]), shape=(3, 3))
self.assertEqual(tuple(sparse.rowsupp(tol=1e-5)), (False, False, True))
self.assertEqual(tuple(sparse.rowsupp(tol=0)), (True, False, True))
def test_solve(self):
rhs = numpy.arange(self.matrix.shape[0])
for args in self.solve_args:
for lhs0 in None, numpy.arange(rhs.size)/rhs.size:
with self.subTest('{},lhs0={}'.format(args.get('solver', 'direct'), 'none' if lhs0 is None else 'single')):
lhs = self.matrix.solve(rhs, lhs0=lhs0, **args)
res = numpy.linalg.norm(self.matrix @ lhs - rhs)
self.assertLess(res, args.get('atol', 1e-10))
def test_multisolve(self):
rhs = numpy.arange(self.matrix.shape[0]*2).reshape(-1, 2)
for name, lhs0 in ('none', None), ('single', numpy.arange(self.matrix.shape[1])), ('multi', numpy.arange(rhs.size).reshape(rhs.shape)):
with self.subTest('lhs0={}'.format(name)):
lhs = self.matrix.solve(rhs, lhs0=lhs0)
res = numpy.linalg.norm(self.matrix @ lhs - rhs, axis=0)
self.assertLess(numpy.max(res), 1e-9)
def test_singular(self):
singularmatrix = matrix.assemble(numpy.arange(self.n)-self.n//2, numpy.arange(self.n)[numpy.newaxis].repeat(2, 0), shape=(self.n, self.n))
rhs = numpy.ones(self.n)
for args in self.solve_args:
with self.subTest(args.get('solver', 'direct')), self.assertRaises(matrix.MatrixError):
lhs = singularmatrix.solve(rhs, **args)
def test_solve_repeated(self):
rhs = numpy.arange(self.matrix.shape[0])
for args in self.solve_args:
with self.subTest(args.get('solver', 'direct')):
for i in range(3):
lhs = self.matrix.solve(rhs, **args)
res = numpy.linalg.norm(self.matrix @ lhs - rhs)
self.assertLess(res, args.get('atol', 1e-10))
def test_constraints(self):
cons = numpy.empty(self.matrix.shape[0])
cons[:] = numpy.nan
cons[0] = 10
cons[-1] = 20
for args in self.solve_args:
with self.subTest(args.get('solver', 'direct')):
lhs = self.matrix.solve(constrain=cons, **args)
self.assertEqual(lhs[0], cons[0])
self.assertEqual(lhs[-1], cons[-1])
res = numpy.linalg.norm((self.matrix @ lhs)[1:-1])
self.assertLess(res, args.get('atol', 1e-10))
def test_submatrix(self):
rows = self.n//2 + numpy.array([0, 1])
cols = self.n//2 + numpy.array([-1, 0, 2])
array = self.matrix.submatrix(rows, cols).export('dense')
self.assertEqual(array.shape, (2, 3))
numpy.testing.assert_equal(actual=array, desired=self.exact[numpy.ix_(rows, cols)])
def test_submatrix_specialcases(self):
mat = matrix.assemble(numpy.array([1, 2, 3, 4]), numpy.array([[0, 0, 2, 2], [0, 2, 0, 2]]), (3, 3))
self.assertAllEqual(mat.export('dense'), [[1, 0, 2], [0, 0, 0], [3, 0, 4]])
self.assertAllEqual(mat.submatrix([0, 2], [0, 1, 2]).export('dense'), [[1, 0, 2], [3, 0, 4]])
self.assertAllEqual(mat.submatrix([0, 1, 2], [0, 2]).export('dense'), [[1, 2], [0, 0], [3, 4]])
self.assertAllEqual(mat.submatrix([0, 2], [0, 2]).export('dense'), [[1, 2], [3, 4]])
self.assertAllEqual(mat.submatrix([1], [1]).export('dense'), [[0]])
def test_pickle(self):
s = pickle.dumps(self.matrix)
mat = pickle.loads(s)
self.assertIsInstance(mat, type(self.matrix))
numpy.testing.assert_equal(mat.export('dense'), self.exact)
with self.subTest('cross-pickle'), matrix.backend('Numpy'):
mat = pickle.loads(s)
from nutils.matrix._numpy import NumpyMatrix
self.assertIsInstance(mat, NumpyMatrix)
numpy.testing.assert_equal(mat.export('dense'), self.exact)
def test_diagonal(self):
self.assertAllEqual(self.matrix.diagonal(), numpy.diag(self.exact))
backend('numpy',
backend='numpy',
solve_args=[{},
dict(solver='direct', atol=1e-8),
dict(atol=1e-5, precon='diag')])
backend('numpy:complex',
backend='numpy',
complex=True,
solve_args=[{},
dict(solver='direct', atol=1e-8)])
backend('scipy',
backend='scipy',
solve_args=[{},
dict(solver='direct', atol=1e-8),
dict(atol=1e-5, precon='diag', truncate=5),
dict(solver='gmres', atol=1e-5, restart=100, precon='spilu0'),
dict(solver='gmres', atol=1e-5, precon='splu'),
dict(solver='cg', atol=1e-5, precon='diag')] + [
dict(solver=s, atol=1e-5) for s in ('bicg', 'bicgstab', 'cg', 'cgs', 'lgmres')])
backend('scipy:complex',
backend='scipy',
complex=True,
solve_args=[{},
dict(solver='direct', atol=1e-8)])
backend('mkl',
backend='mkl',
solve_args=[{},
dict(solver='direct', atol=1e-8),
dict(solver='direct', symmetric=True, atol=1e-8),
dict(atol=1e-5, precon='diag', truncate=5),
dict(solver='fgmres', atol=1e-8),
dict(solver='fgmres', atol=1e-8, precon='diag')])
backend('mkl:complex',
backend='mkl',
complex=True,
solve_args=[{},
dict(solver='direct', atol=1e-8),
dict(solver='direct', symmetric=True, atol=1e-8)])
| {
"content_hash": "785252a5c5df9b17f3b11d523b4c6444",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 146,
"avg_line_length": 43.87843137254902,
"alnum_prop": 0.5685047814818125,
"repo_name": "wijnandhoitinga/nutils",
"id": "f8e580c582ffef45ed34753904c1991d8d41ae31",
"size": "11189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "5351"
},
{
"name": "Python",
"bytes": "1660391"
}
],
"symlink_target": ""
} |
import image_search
import pickle
import imlist
import numpy as np
import os
from numpy import *
def read_features_from_file(filename):
f = loadtxt(filename,dtype="string")
return f[:,:] # feature locations, descriptors
images=imlist.get_imlist("/home/ashish/dip/chap7/images_test/")
features_path="/home/ashish/dip/chap7/features_test/"
nbr_images=len(images)
featureslist=[]
for img in images:
img_txt=os.path.splitext(img)[0]
des_file=features_path+img_txt+"des"+".txt"
featureslist.append(des_file)
with open('vocabulary.pkl', 'rb') as f:
voc = pickle.load(f)
indx = image_search.Indexer('test.db',voc)
#indx.create_tables()
for i in range(nbr_images):
descr = (read_features_from_file(featureslist[i])).astype(np.float)
indx.add_to_index(images[i],descr)
# commit to database
indx.db_commit() | {
"content_hash": "10ca9c6b30b8bc9d5d964bf9016f497d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 24.636363636363637,
"alnum_prop": 0.7355473554735548,
"repo_name": "ashishchopra778/Content-Based-Image-Retrieval",
"id": "83b28c4b3364bf4c616acb9e35e7248a7b1af0ed",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9787"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='Hugh',
version='0.1',
packages=find_packages(),
install_requires = [
'Werkzeug',
],
)
| {
"content_hash": "4a3b763ebced3eefc987fea56b7f4d18",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 17.2,
"alnum_prop": 0.5930232558139535,
"repo_name": "mgood/hugh",
"id": "f5554d555c9ceb7ddd6ea448c5914ba3c2c85f36",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "94110"
}
],
"symlink_target": ""
} |
from wpilib import *
from math import pi as PI
from sys import stderr
from time import time
from PidDrive import EncoderDrive
class RobotMap:
"""
The RobotMap is a mapping from the ports sensors and actuators are wired into
to a variable name. This provides flexibility changing wiring, makes checking
the wiring easier and significantly reduces the number of magic numbers
floating around.
"""
# For example to map the left and right motors, you could define the
# following variables to use with your drivetrain subsystem.
left_talon = CANTalon(0)
right_talon = CANTalon(1)
left_talon.enableBrakeMode(True)
right_talon.enableBrakeMode(True)
encoder_ticks = 360
wheel_d = 8
dist_for_360 = 37.5 * PI
right_encoder = Encoder(0,1)
right_encoder.setPIDSourceParameter(Encoder.PIDSourceParameter.kDistance)
right_encoder.setDistancePerPulse(wheel_d/encoder_ticks)
left_encoder = Encoder(2,3)
left_encoder.setPIDSourceParameter(Encoder.PIDSourceParameter.kDistance)
left_encoder.setDistancePerPulse(wheel_d/encoder_ticks)
chassis = EncoderDrive(left_talon, right_talon, leftEncoder=left_encoder, rightEncoder=right_encoder)
flipper_solenoid = Solenoid(0)
bakery_solenoid = Solenoid(1)
nostril_solenoid = Solenoid(2)
nostril_switch = DigitalInput(4)
bakery_switch_r = DigitalInput(5)
bakery_switch_l = DigitalInput(6)
compressor = Compressor(0)
| {
"content_hash": "61398e2c0018a03b4f8d48b1ac2547b9",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 105,
"avg_line_length": 35.476190476190474,
"alnum_prop": 0.7241610738255033,
"repo_name": "guineawheek/team-2785-frc2015",
"id": "e1c8495c7215661efa909b74e038dd28e47a82fe",
"size": "1490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot_map.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18687"
}
],
"symlink_target": ""
} |
"""
Object-oriented filesystem path representation.
"""
from __future__ import division, absolute_import
import os
import sys
import errno
import base64
from os.path import isabs, exists, normpath, abspath, splitext
from os.path import basename, dirname, join as joinpath
from os import listdir, utime, stat
from stat import S_ISREG, S_ISDIR, S_IMODE, S_ISBLK, S_ISSOCK
from stat import S_IRUSR, S_IWUSR, S_IXUSR
from stat import S_IRGRP, S_IWGRP, S_IXGRP
from stat import S_IROTH, S_IWOTH, S_IXOTH
from zope.interface import Interface, Attribute, implementer
# Please keep this as light as possible on other Twisted imports; many, many
# things import this module, and it would be good if it could easily be
# modified for inclusion in the standard library. --glyph
from twisted.python.compat import comparable, cmp, unicode
from twisted.python.deprecate import deprecated
from twisted.python.runtime import platform
from incremental import Version
from twisted.python.win32 import ERROR_FILE_NOT_FOUND, ERROR_PATH_NOT_FOUND
from twisted.python.win32 import ERROR_INVALID_NAME, ERROR_DIRECTORY, O_BINARY
from twisted.python.win32 import WindowsError
from twisted.python.util import FancyEqMixin
_CREATE_FLAGS = (os.O_EXCL |
os.O_CREAT |
os.O_RDWR |
O_BINARY)
def _stub_islink(path):
"""
Always return C{False} if the operating system does not support symlinks.
@param path: A path string.
@type path: L{str}
@return: C{False}
@rtype: L{bool}
"""
return False
islink = getattr(os.path, 'islink', _stub_islink)
randomBytes = os.urandom
armor = base64.urlsafe_b64encode
class IFilePath(Interface):
"""
File path object.
A file path represents a location for a file-like-object and can be
organized into a hierarchy; a file path can can children which are
themselves file paths.
A file path has a name which unique identifies it in the context of its
parent (if it has one); a file path can not have two children with the same
name. This name is referred to as the file path's "base name".
A series of such names can be used to locate nested children of a file
path; such a series is referred to as the child's "path", relative to the
parent. In this case, each name in the path is referred to as a "path
segment"; the child's base name is the segment in the path.
When representing a file path as a string, a "path separator" is used to
delimit the path segments within the string. For a file system path, that
would be C{os.sep}.
Note that the values of child names may be restricted. For example, a file
system path will not allow the use of the path separator in a name, and
certain names (e.g. C{"."} and C{".."}) may be reserved or have special
meanings.
@since: 12.1
"""
sep = Attribute("The path separator to use in string representations")
def child(name):
"""
Obtain a direct child of this file path. The child may or may not
exist.
@param name: the name of a child of this path. C{name} must be a direct
child of this path and may not contain a path separator.
@return: the child of this path with the given C{name}.
@raise InsecurePath: if C{name} describes a file path that is not a
direct child of this file path.
"""
def open(mode="r"):
"""
Opens this file path with the given mode.
@return: a file-like object.
@raise Exception: if this file path cannot be opened.
"""
def changed():
"""
Clear any cached information about the state of this path on disk.
"""
def getsize():
"""
Retrieve the size of this file in bytes.
@return: the size of the file at this file path in bytes.
@raise Exception: if the size cannot be obtained.
"""
def getModificationTime():
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def getStatusChangeTime():
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def getAccessTime():
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def exists():
"""
Check if this file path exists.
@return: C{True} if the file at this file path exists, C{False}
otherwise.
@rtype: L{bool}
"""
def isdir():
"""
Check if this file path refers to a directory.
@return: C{True} if the file at this file path is a directory, C{False}
otherwise.
"""
def isfile():
"""
Check if this file path refers to a regular file.
@return: C{True} if the file at this file path is a regular file,
C{False} otherwise.
"""
def children():
"""
List the children of this path object.
@return: a sequence of the children of the directory at this file path.
@raise Exception: if the file at this file path is not a directory.
"""
def basename():
"""
Retrieve the final component of the file path's path (everything
after the final path separator).
@return: the base name of this file path.
@rtype: L{str}
"""
def parent():
"""
A file path for the directory containing the file at this file path.
"""
def sibling(name):
"""
A file path for the directory containing the file at this file path.
@param name: the name of a sibling of this path. C{name} must be a
direct sibling of this path and may not contain a path separator.
@return: a sibling file path of this one.
"""
class InsecurePath(Exception):
"""
Error that is raised when the path provided to L{FilePath} is invalid.
"""
class LinkError(Exception):
"""
An error with symlinks - either that there are cyclical symlinks or that
symlink are not supported on this platform.
"""
class UnlistableError(OSError):
"""
An exception which is used to distinguish between errors which mean 'this
is not a directory you can list' and other, more catastrophic errors.
This error will try to look as much like the original error as possible,
while still being catchable as an independent type.
@ivar originalException: the actual original exception instance, either an
L{OSError} or a L{WindowsError}.
"""
def __init__(self, originalException):
"""
Create an UnlistableError exception.
@param originalException: an instance of OSError.
"""
self.__dict__.update(originalException.__dict__)
self.originalException = originalException
class _WindowsUnlistableError(UnlistableError, WindowsError):
"""
This exception is raised on Windows, for compatibility with previous
releases of FilePath where unportable programs may have done "except
WindowsError:" around a call to children().
It is private because all application code may portably catch
L{UnlistableError} instead.
"""
def _secureEnoughString(path):
"""
Compute a string usable as a new, temporary filename.
@param path: The path that the new temporary filename should be able to be
concatenated with.
@return: A pseudorandom, 16 byte string for use in secure filenames.
@rtype: the type of C{path}
"""
secureishString = armor(randomBytes(16))[:16]
return _coerceToFilesystemEncoding(path, secureishString)
class AbstractFilePath(object):
"""
Abstract implementation of an L{IFilePath}; must be completed by a
subclass.
This class primarily exists to provide common implementations of certain
methods in L{IFilePath}. It is *not* a required parent class for
L{IFilePath} implementations, just a useful starting point.
"""
def getContent(self):
"""
Retrieve the contents of the file at this path.
@return: the contents of the file
@rtype: L{bytes}
"""
with self.open() as fp:
return fp.read()
def parents(self):
"""
Retrieve an iterator of all the ancestors of this path.
@return: an iterator of all the ancestors of this path, from the most
recent (its immediate parent) to the root of its filesystem.
"""
path = self
parent = path.parent()
# root.parent() == root, so this means "are we the root"
while path != parent:
yield parent
path = parent
parent = parent.parent()
def children(self):
"""
List the children of this path object.
@raise OSError: If an error occurs while listing the directory. If the
error is 'serious', meaning that the operation failed due to an access
violation, exhaustion of some kind of resource (file descriptors or
memory), OSError or a platform-specific variant will be raised.
@raise UnlistableError: If the inability to list the directory is due
to this path not existing or not being a directory, the more specific
OSError subclass L{UnlistableError} is raised instead.
@return: an iterable of all currently-existing children of this object.
"""
try:
subnames = self.listdir()
except WindowsError as winErrObj:
# Under Python 3.3 and higher on Windows, WindowsError is an
# alias for OSError. OSError has a winerror attribute and an
# errno attribute.
# Under Python 2, WindowsError is an OSError subclass.
# Under Python 2.5 and higher on Windows, WindowsError has a
# winerror attribute and an errno attribute.
# The winerror attribute is bound to the Windows error code while
# the errno attribute is bound to a translation of that code to a
# perhaps equivalent POSIX error number.
#
# For further details, refer to:
# https://docs.python.org/3/library/exceptions.html#OSError
# If not for this clause OSError would be handling all of these
# errors on Windows. The errno attribute contains a POSIX error
# code while the winerror attribute contains a Windows error code.
# Windows error codes aren't the same as POSIX error codes,
# so we need to handle them differently.
# Under Python 2.4 on Windows, WindowsError only has an errno
# attribute. It is bound to the Windows error code.
# For simplicity of code and to keep the number of paths through
# this suite minimal, we grab the Windows error code under either
# version.
# Furthermore, attempting to use os.listdir on a non-existent path
# in Python 2.4 will result in a Windows error code of
# ERROR_PATH_NOT_FOUND. However, in Python 2.5,
# ERROR_FILE_NOT_FOUND results instead. -exarkun
winerror = getattr(winErrObj, 'winerror', winErrObj.errno)
if winerror not in (ERROR_PATH_NOT_FOUND,
ERROR_FILE_NOT_FOUND,
ERROR_INVALID_NAME,
ERROR_DIRECTORY):
raise
raise _WindowsUnlistableError(winErrObj)
except OSError as ose:
if ose.errno not in (errno.ENOENT, errno.ENOTDIR):
# Other possible errors here, according to linux manpages:
# EACCES, EMIFLE, ENFILE, ENOMEM. None of these seem like the
# sort of thing which should be handled normally. -glyph
raise
raise UnlistableError(ose)
return map(self.child, subnames)
def walk(self, descend=None):
"""
Yield myself, then each of my children, and each of those children's
children in turn.
The optional argument C{descend} is a predicate that takes a FilePath,
and determines whether or not that FilePath is traversed/descended
into. It will be called with each path for which C{isdir} returns
C{True}. If C{descend} is not specified, all directories will be
traversed (including symbolic links which refer to directories).
@param descend: A one-argument callable that will return True for
FilePaths that should be traversed, False otherwise.
@return: a generator yielding FilePath-like objects.
"""
yield self
if self.isdir():
for c in self.children():
# we should first see if it's what we want, then we
# can walk through the directory
if (descend is None or descend(c)):
for subc in c.walk(descend):
if os.path.realpath(self.path).startswith(
os.path.realpath(subc.path)):
raise LinkError("Cycle in file graph.")
yield subc
else:
yield c
def sibling(self, path):
"""
Return a L{FilePath} with the same directory as this instance but with
a basename of C{path}.
@param path: The basename of the L{FilePath} to return.
@type path: L{str}
@return: The sibling path.
@rtype: L{FilePath}
"""
return self.parent().child(path)
def descendant(self, segments):
"""
Retrieve a child or child's child of this path.
@param segments: A sequence of path segments as L{str} instances.
@return: A L{FilePath} constructed by looking up the C{segments[0]}
child of this path, the C{segments[1]} child of that path, and so
on.
@since: 10.2
"""
path = self
for name in segments:
path = path.child(name)
return path
def segmentsFrom(self, ancestor):
"""
Return a list of segments between a child and its ancestor.
For example, in the case of a path X representing /a/b/c/d and a path Y
representing /a/b, C{Y.segmentsFrom(X)} will return C{['c',
'd']}.
@param ancestor: an instance of the same class as self, ostensibly an
ancestor of self.
@raise: ValueError if the 'ancestor' parameter is not actually an
ancestor, i.e. a path for /x/y/z is passed as an ancestor for /a/b/c/d.
@return: a list of strs
"""
# this might be an unnecessarily inefficient implementation but it will
# work on win32 and for zipfiles; later I will deterimine if the
# obvious fast implemenation does the right thing too
f = self
p = f.parent()
segments = []
while f != ancestor and p != f:
segments[0:0] = [f.basename()]
f = p
p = p.parent()
if f == ancestor and segments:
return segments
raise ValueError("%r not parent of %r" % (ancestor, self))
# new in 8.0
def __hash__(self):
"""
Hash the same as another L{FilePath} with the same path as mine.
"""
return hash((self.__class__, self.path))
# pending deprecation in 8.0
def getmtime(self):
"""
Deprecated. Use getModificationTime instead.
"""
return int(self.getModificationTime())
def getatime(self):
"""
Deprecated. Use getAccessTime instead.
"""
return int(self.getAccessTime())
def getctime(self):
"""
Deprecated. Use getStatusChangeTime instead.
"""
return int(self.getStatusChangeTime())
class RWX(FancyEqMixin, object):
"""
A class representing read/write/execute permissions for a single user
category (i.e. user/owner, group, or other/world). Instantiate with
three boolean values: readable? writable? executable?.
@type read: C{bool}
@ivar read: Whether permission to read is given
@type write: C{bool}
@ivar write: Whether permission to write is given
@type execute: C{bool}
@ivar execute: Whether permission to execute is given
@since: 11.1
"""
compareAttributes = ('read', 'write', 'execute')
def __init__(self, readable, writable, executable):
self.read = readable
self.write = writable
self.execute = executable
def __repr__(self):
return "RWX(read=%s, write=%s, execute=%s)" % (
self.read, self.write, self.execute)
def shorthand(self):
"""
Returns a short string representing the permission bits. Looks like
part of what is printed by command line utilities such as 'ls -l'
(e.g. 'rwx')
@return: The shorthand string.
@rtype: L{str}
"""
returnval = ['r', 'w', 'x']
i = 0
for val in (self.read, self.write, self.execute):
if not val:
returnval[i] = '-'
i += 1
return ''.join(returnval)
class Permissions(FancyEqMixin, object):
"""
A class representing read/write/execute permissions. Instantiate with any
portion of the file's mode that includes the permission bits.
@type user: L{RWX}
@ivar user: User/Owner permissions
@type group: L{RWX}
@ivar group: Group permissions
@type other: L{RWX}
@ivar other: Other/World permissions
@since: 11.1
"""
compareAttributes = ('user', 'group', 'other')
def __init__(self, statModeInt):
self.user, self.group, self.other = (
[RWX(*[statModeInt & bit > 0 for bit in bitGroup]) for bitGroup in
[[S_IRUSR, S_IWUSR, S_IXUSR],
[S_IRGRP, S_IWGRP, S_IXGRP],
[S_IROTH, S_IWOTH, S_IXOTH]]]
)
def __repr__(self):
return "[%s | %s | %s]" % (
str(self.user), str(self.group), str(self.other))
def shorthand(self):
"""
Returns a short string representing the permission bits. Looks like
what is printed by command line utilities such as 'ls -l'
(e.g. 'rwx-wx--x')
@return: The shorthand string.
@rtype: L{str}
"""
return "".join(
[x.shorthand() for x in (self.user, self.group, self.other)])
class _SpecialNoValue(object):
"""
An object that represents 'no value', to be used in deprecating statinfo.
Please remove once statinfo is removed.
"""
pass
def _asFilesystemBytes(path, encoding=None):
"""
Return C{path} as a string of L{bytes} suitable for use on this system's
filesystem.
@param path: The path to be made suitable.
@type path: L{bytes} or L{unicode}
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes}
"""
if type(path) == bytes:
return path
else:
if encoding is None:
encoding = sys.getfilesystemencoding()
return path.encode(encoding)
def _asFilesystemText(path, encoding=None):
"""
Return C{path} as a string of L{unicode} suitable for use on this system's
filesystem.
@param path: The path to be made suitable.
@type path: L{bytes} or L{unicode}
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode}
"""
if type(path) == unicode:
return path
else:
if encoding is None:
encoding = sys.getfilesystemencoding()
return path.decode(encoding)
def _coerceToFilesystemEncoding(path, newpath, encoding=None):
"""
Return a C{newpath} that is suitable for joining to C{path}.
@param path: The path that it should be suitable for joining to.
@param newpath: The new portion of the path to be coerced if needed.
@param encoding: If coerced, the encoding that will be used.
"""
if type(path) == bytes:
return _asFilesystemBytes(newpath, encoding=encoding)
else:
return _asFilesystemText(newpath, encoding=encoding)
@comparable
@implementer(IFilePath)
class FilePath(AbstractFilePath):
"""
I am a path on the filesystem that only permits 'downwards' access.
Instantiate me with a pathname (for example,
FilePath('/home/myuser/public_html')) and I will attempt to only provide
access to files which reside inside that path. I may be a path to a file,
a directory, or a file which does not exist.
The correct way to use me is to instantiate me, and then do ALL filesystem
access through me. In other words, do not import the 'os' module; if you
need to open a file, call my 'open' method. If you need to list a
directory, call my 'path' method.
Even if you pass me a relative path, I will convert that to an absolute
path internally.
Note: although time-related methods do return floating-point results, they
may still be only second resolution depending on the platform and the last
value passed to L{os.stat_float_times}. If you want greater-than-second
precision, call C{os.stat_float_times(True)}, or use Python 2.5.
Greater-than-second precision is only available in Windows on Python2.5 and
later.
The type of C{path} when instantiating decides the mode of the L{FilePath}.
That is, C{FilePath(b"/")} will return a L{bytes} mode L{FilePath}, and
C{FilePath(u"/")} will return a L{unicode} mode L{FilePath}.
C{FilePath("/")} will return a L{bytes} mode L{FilePath} on Python 2, and a
L{unicode} mode L{FilePath} on Python 3.
Methods that return a new L{FilePath} use the type of the given subpath to
decide its mode. For example, C{FilePath(b"/").child(u"tmp")} will return a
L{unicode} mode L{FilePath}.
@type alwaysCreate: L{bool}
@ivar alwaysCreate: When opening this file, only succeed if the file does
not already exist.
@type path: L{bytes} or L{unicode}
@ivar path: The path from which 'downward' traversal is permitted.
@ivar statinfo: (WARNING: statinfo is deprecated as of Twisted 15.0.0 and
will become a private attribute)
The currently cached status information about the file on
the filesystem that this L{FilePath} points to. This attribute is
L{None} if the file is in an indeterminate state (either this
L{FilePath} has not yet had cause to call C{stat()} yet or
L{FilePath.changed} indicated that new information is required), 0 if
C{stat()} was called and returned an error (i.e. the path did not exist
when C{stat()} was called), or a C{stat_result} object that describes
the last known status of the underlying file (or directory, as the case
may be). Trust me when I tell you that you do not want to use this
attribute. Instead, use the methods on L{FilePath} which give you
information about it, like C{getsize()}, C{isdir()},
C{getModificationTime()}, and so on.
@type statinfo: L{int} or L{None} or L{os.stat_result}
"""
_statinfo = None
path = None
def __init__(self, path, alwaysCreate=False):
"""
Convert a path string to an absolute path if necessary and initialize
the L{FilePath} with the result.
"""
self.path = abspath(path)
self.alwaysCreate = alwaysCreate
def __getstate__(self):
"""
Support serialization by discarding cached L{os.stat} results and
returning everything else.
"""
d = self.__dict__.copy()
if '_statinfo' in d:
del d['_statinfo']
return d
@property
def sep(self):
"""
Return a filesystem separator.
@return: The native filesystem separator.
@returntype: The same type as C{self.path}.
"""
return _coerceToFilesystemEncoding(self.path, os.sep)
def _asBytesPath(self, encoding=None):
"""
Return the path of this L{FilePath} as bytes.
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes}
"""
return _asFilesystemBytes(self.path, encoding=encoding)
def _asTextPath(self, encoding=None):
"""
Return the path of this L{FilePath} as text.
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode}
"""
return _asFilesystemText(self.path, encoding=encoding)
def asBytesMode(self, encoding=None):
"""
Return this L{FilePath} in L{bytes}-mode.
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes} mode L{FilePath}
"""
if type(self.path) == unicode:
return self.clonePath(self._asBytesPath(encoding=encoding))
return self
def asTextMode(self, encoding=None):
"""
Return this L{FilePath} in L{unicode}-mode.
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode} mode L{FilePath}
"""
if type(self.path) == bytes:
return self.clonePath(self._asTextPath(encoding=encoding))
return self
def _getPathAsSameTypeAs(self, pattern):
"""
If C{pattern} is C{bytes}, return L{FilePath.path} as L{bytes}.
Otherwise, return L{FilePath.path} as L{unicode}.
@param pattern: The new element of the path that L{FilePath.path} may
need to be coerced to match.
"""
if type(pattern) == bytes:
return self._asBytesPath()
else:
return self._asTextPath()
def child(self, path):
"""
Create and return a new L{FilePath} representing a path contained by
C{self}.
@param path: The base name of the new L{FilePath}. If this contains
directory separators or parent references it will be rejected.
@type path: L{bytes} or L{unicode}
@raise InsecurePath: If the result of combining this path with C{path}
would result in a path which is not a direct child of this path.
@return: The child path.
@rtype: L{FilePath} with a mode equal to the type of C{path}.
"""
colon = _coerceToFilesystemEncoding(path, ":")
sep = _coerceToFilesystemEncoding(path, os.sep)
ourPath = self._getPathAsSameTypeAs(path)
if platform.isWindows() and path.count(colon):
# Catch paths like C:blah that don't have a slash
raise InsecurePath("%r contains a colon." % (path,))
norm = normpath(path)
if sep in norm:
raise InsecurePath("%r contains one or more directory separators" %
(path,))
newpath = abspath(joinpath(ourPath, norm))
if not newpath.startswith(ourPath):
raise InsecurePath("%r is not a child of %s" %
(newpath, ourPath))
return self.clonePath(newpath)
def preauthChild(self, path):
"""
Use me if C{path} might have slashes in it, but you know they're safe.
@param path: A relative path (ie, a path not starting with C{"/"})
which will be interpreted as a child or descendant of this path.
@type path: L{bytes} or L{unicode}
@return: The child path.
@rtype: L{FilePath} with a mode equal to the type of C{path}.
"""
ourPath = self._getPathAsSameTypeAs(path)
newpath = abspath(joinpath(ourPath, normpath(path)))
if not newpath.startswith(ourPath):
raise InsecurePath("%s is not a child of %s" %
(newpath, ourPath))
return self.clonePath(newpath)
def childSearchPreauth(self, *paths):
"""
Return my first existing child with a name in C{paths}.
C{paths} is expected to be a list of *pre-secured* path fragments;
in most cases this will be specified by a system administrator and not
an arbitrary user.
If no appropriately-named children exist, this will return L{None}.
@return: L{None} or the child path.
@rtype: L{None} or L{FilePath}
"""
for child in paths:
p = self._getPathAsSameTypeAs(child)
jp = joinpath(p, child)
if exists(jp):
return self.clonePath(jp)
def siblingExtensionSearch(self, *exts):
"""
Attempt to return a path with my name, given multiple possible
extensions.
Each extension in C{exts} will be tested and the first path which
exists will be returned. If no path exists, L{None} will be returned.
If C{''} is in C{exts}, then if the file referred to by this path
exists, C{self} will be returned.
The extension '*' has a magic meaning, which means "any path that
begins with C{self.path + '.'} is acceptable".
"""
for ext in exts:
if not ext and self.exists():
return self
p = self._getPathAsSameTypeAs(ext)
star = _coerceToFilesystemEncoding(ext, "*")
dot = _coerceToFilesystemEncoding(ext, ".")
if ext == star:
basedot = basename(p) + dot
for fn in listdir(dirname(p)):
if fn.startswith(basedot):
return self.clonePath(joinpath(dirname(p), fn))
p2 = p + ext
if exists(p2):
return self.clonePath(p2)
def realpath(self):
"""
Returns the absolute target as a L{FilePath} if self is a link, self
otherwise.
The absolute link is the ultimate file or directory the
link refers to (for instance, if the link refers to another link, and
another...). If the filesystem does not support symlinks, or
if the link is cyclical, raises a L{LinkError}.
Behaves like L{os.path.realpath} in that it does not resolve link
names in the middle (ex. /x/y/z, y is a link to w - realpath on z
will return /x/y/z, not /x/w/z).
@return: L{FilePath} of the target path.
@rtype: L{FilePath}
@raises LinkError: if links are not supported or links are cyclical.
"""
if self.islink():
result = os.path.realpath(self.path)
if result == self.path:
raise LinkError("Cyclical link - will loop forever")
return self.clonePath(result)
return self
def siblingExtension(self, ext):
"""
Attempt to return a path with my name, given the extension at C{ext}.
@param ext: File-extension to search for.
@type ext: L{bytes} or L{unicode}
@return: The sibling path.
@rtype: L{FilePath} with the same mode as the type of C{ext}.
"""
ourPath = self._getPathAsSameTypeAs(ext)
return self.clonePath(ourPath + ext)
def linkTo(self, linkFilePath):
"""
Creates a symlink to self to at the path in the L{FilePath}
C{linkFilePath}.
Only works on posix systems due to its dependence on
L{os.symlink}. Propagates L{OSError}s up from L{os.symlink} if
C{linkFilePath.parent()} does not exist, or C{linkFilePath} already
exists.
@param linkFilePath: a FilePath representing the link to be created.
@type linkFilePath: L{FilePath}
"""
os.symlink(self.path, linkFilePath.path)
def open(self, mode='r'):
"""
Open this file using C{mode} or for writing if C{alwaysCreate} is
C{True}.
In all cases the file is opened in binary mode, so it is not necessary
to include C{"b"} in C{mode}.
@param mode: The mode to open the file in. Default is C{"r"}.
@type mode: L{str}
@raises AssertionError: If C{"a"} is included in the mode and
C{alwaysCreate} is C{True}.
@rtype: L{file}
@return: An open L{file} object.
"""
if self.alwaysCreate:
assert 'a' not in mode, ("Appending not supported when "
"alwaysCreate == True")
return self.create()
# This hack is necessary because of a bug in Python 2.7 on Windows:
# http://bugs.python.org/issue7686
mode = mode.replace('b', '')
return open(self.path, mode + 'b')
# stat methods below
def restat(self, reraise=True):
"""
Re-calculate cached effects of 'stat'. To refresh information on this
path after you know the filesystem may have changed, call this method.
@param reraise: a boolean. If true, re-raise exceptions from
L{os.stat}; otherwise, mark this path as not existing, and remove
any cached stat information.
@raise Exception: If C{reraise} is C{True} and an exception occurs
while reloading metadata.
"""
try:
self._statinfo = stat(self.path)
except OSError:
self._statinfo = 0
if reraise:
raise
def changed(self):
"""
Clear any cached information about the state of this path on disk.
@since: 10.1.0
"""
self._statinfo = None
def chmod(self, mode):
"""
Changes the permissions on self, if possible. Propagates errors from
L{os.chmod} up.
@param mode: integer representing the new permissions desired (same as
the command line chmod)
@type mode: L{int}
"""
os.chmod(self.path, mode)
def getsize(self):
"""
Retrieve the size of this file in bytes.
@return: The size of the file at this file path in bytes.
@raise Exception: if the size cannot be obtained.
@rtype: L{int}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_size
def getModificationTime(self):
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_mtime)
def getStatusChangeTime(self):
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_ctime)
def getAccessTime(self):
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_atime)
def getInodeNumber(self):
"""
Retrieve the file serial number, also called inode number, which
distinguishes this file from all other files on the same device.
@raise NotImplementedError: if the platform is Windows, since the
inode number would be a dummy value for all files in Windows
@return: a number representing the file serial number
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_ino
def getDevice(self):
"""
Retrieves the device containing the file. The inode number and device
number together uniquely identify the file, but the device number is
not necessarily consistent across reboots or system crashes.
@raise NotImplementedError: if the platform is Windows, since the
device number would be 0 for all partitions on a Windows platform
@return: a number representing the device
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_dev
def getNumberOfHardLinks(self):
"""
Retrieves the number of hard links to the file.
This count keeps track of how many directories have entries for this
file. If the count is ever decremented to zero then the file itself is
discarded as soon as no process still holds it open. Symbolic links
are not counted in the total.
@raise NotImplementedError: if the platform is Windows, since Windows
doesn't maintain a link count for directories, and L{os.stat} does
not set C{st_nlink} on Windows anyway.
@return: the number of hard links to the file
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_nlink
def getUserID(self):
"""
Returns the user ID of the file's owner.
@raise NotImplementedError: if the platform is Windows, since the UID
is always 0 on Windows
@return: the user ID of the file's owner
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_uid
def getGroupID(self):
"""
Returns the group ID of the file.
@raise NotImplementedError: if the platform is Windows, since the GID
is always 0 on windows
@return: the group ID of the file
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_gid
def getPermissions(self):
"""
Returns the permissions of the file. Should also work on Windows,
however, those permissions may not be what is expected in Windows.
@return: the permissions for the file
@rtype: L{Permissions}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return Permissions(S_IMODE(st.st_mode))
def exists(self):
"""
Check if this L{FilePath} exists.
@return: C{True} if the stats of C{path} can be retrieved successfully,
C{False} in the other cases.
@rtype: L{bool}
"""
if self._statinfo:
return True
else:
self.restat(False)
if self._statinfo:
return True
else:
return False
def isdir(self):
"""
Check if this L{FilePath} refers to a directory.
@return: C{True} if this L{FilePath} refers to a directory, C{False}
otherwise.
@rtype: L{bool}
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISDIR(st.st_mode)
def isfile(self):
"""
Check if this file path refers to a regular file.
@return: C{True} if this L{FilePath} points to a regular file (not a
directory, socket, named pipe, etc), C{False} otherwise.
@rtype: L{bool}
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISREG(st.st_mode)
def isBlockDevice(self):
"""
Returns whether the underlying path is a block device.
@return: C{True} if it is a block device, C{False} otherwise
@rtype: L{bool}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISBLK(st.st_mode)
def isSocket(self):
"""
Returns whether the underlying path is a socket.
@return: C{True} if it is a socket, C{False} otherwise
@rtype: L{bool}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISSOCK(st.st_mode)
def islink(self):
"""
Check if this L{FilePath} points to a symbolic link.
@return: C{True} if this L{FilePath} points to a symbolic link,
C{False} otherwise.
@rtype: L{bool}
"""
# We can't use cached stat results here, because that is the stat of
# the destination - (see #1773) which in *every case* but this one is
# the right thing to use. We could call lstat here and use that, but
# it seems unlikely we'd actually save any work that way. -glyph
return islink(self.path)
def isabs(self):
"""
Check if this L{FilePath} refers to an absolute path.
This always returns C{True}.
@return: C{True}, always.
@rtype: L{bool}
"""
return isabs(self.path)
def listdir(self):
"""
List the base names of the direct children of this L{FilePath}.
@return: A L{list} of L{bytes}/L{unicode} giving the names of the
contents of the directory this L{FilePath} refers to. These names
are relative to this L{FilePath}.
@rtype: L{list}
@raise: Anything the platform L{os.listdir} implementation might raise
(typically L{OSError}).
"""
return listdir(self.path)
def splitext(self):
"""
Split the file path into a pair C{(root, ext)} such that
C{root + ext == path}.
@return: Tuple where the first item is the filename and second item is
the file extension. See Python docs for L{os.path.splitext}.
@rtype: L{tuple}
"""
return splitext(self.path)
def __repr__(self):
return 'FilePath(%r)' % (self.path,)
def touch(self):
"""
Updates the access and last modification times of the file at this
file path to the current time. Also creates the file if it does not
already exist.
@raise Exception: if unable to create or modify the last modification
time of the file.
"""
try:
self.open('a').close()
except IOError:
pass
utime(self.path, None)
def remove(self):
"""
Removes the file or directory that is represented by self. If
C{self.path} is a directory, recursively remove all its children
before removing the directory. If it's a file or link, just delete it.
"""
if self.isdir() and not self.islink():
for child in self.children():
child.remove()
os.rmdir(self.path)
else:
os.remove(self.path)
self.changed()
def makedirs(self, ignoreExistingDirectory=False):
"""
Create all directories not yet existing in C{path} segments, using
L{os.makedirs}.
@param ignoreExistingDirectory: Don't raise L{OSError} if directory
already exists.
@type ignoreExistingDirectory: L{bool}
@return: L{None}
"""
try:
return os.makedirs(self.path)
except OSError as e:
if not (
e.errno == errno.EEXIST and
ignoreExistingDirectory and
self.isdir()):
raise
def globChildren(self, pattern):
"""
Assuming I am representing a directory, return a list of FilePaths
representing my children that match the given pattern.
@param pattern: A glob pattern to use to match child paths.
@type pattern: L{unicode} or L{bytes}
@return: A L{list} of matching children.
@rtype: L{list} of L{FilePath}, with the mode of C{pattern}'s type
"""
sep = _coerceToFilesystemEncoding(pattern, os.sep)
ourPath = self._getPathAsSameTypeAs(pattern)
import glob
path = ourPath[-1] == sep and ourPath + pattern \
or sep.join([ourPath, pattern])
return list(map(self.clonePath, glob.glob(path)))
def basename(self):
"""
Retrieve the final component of the file path's path (everything
after the final path separator).
@return: The final component of the L{FilePath}'s path (Everything
after the final path separator).
@rtype: the same type as this L{FilePath}'s C{path} attribute
"""
return basename(self.path)
def dirname(self):
"""
Retrieve all of the components of the L{FilePath}'s path except the
last one (everything up to the final path separator).
@return: All of the components of the L{FilePath}'s path except the
last one (everything up to the final path separator).
@rtype: the same type as this L{FilePath}'s C{path} attribute
"""
return dirname(self.path)
def parent(self):
"""
A file path for the directory containing the file at this file path.
@return: A L{FilePath} representing the path which directly contains
this L{FilePath}.
@rtype: L{FilePath}
"""
return self.clonePath(self.dirname())
def setContent(self, content, ext=b'.new'):
"""
Replace the file at this path with a new file that contains the given
bytes, trying to avoid data-loss in the meanwhile.
On UNIX-like platforms, this method does its best to ensure that by the
time this method returns, either the old contents I{or} the new
contents of the file will be present at this path for subsequent
readers regardless of premature device removal, program crash, or power
loss, making the following assumptions:
- your filesystem is journaled (i.e. your filesystem will not
I{itself} lose data due to power loss)
- your filesystem's C{rename()} is atomic
- your filesystem will not discard new data while preserving new
metadata (see U{http://mjg59.livejournal.com/108257.html} for
more detail)
On most versions of Windows there is no atomic C{rename()} (see
U{http://bit.ly/win32-overwrite} for more information), so this method
is slightly less helpful. There is a small window where the file at
this path may be deleted before the new file is moved to replace it:
however, the new file will be fully written and flushed beforehand so
in the unlikely event that there is a crash at that point, it should be
possible for the user to manually recover the new version of their
data. In the future, Twisted will support atomic file moves on those
versions of Windows which I{do} support them: see U{Twisted ticket
3004<http://twistedmatrix.com/trac/ticket/3004>}.
This method should be safe for use by multiple concurrent processes,
but note that it is not easy to predict which process's contents will
ultimately end up on disk if they invoke this method at close to the
same time.
@param content: The desired contents of the file at this path.
@type content: L{bytes}
@param ext: An extension to append to the temporary filename used to
store the bytes while they are being written. This can be used to
make sure that temporary files can be identified by their suffix,
for cleanup in case of crashes.
@type ext: L{bytes}
"""
sib = self.temporarySibling(ext)
with sib.open('w') as f:
f.write(content)
if platform.isWindows() and exists(self.path):
os.unlink(self.path)
os.rename(sib.path, self.asBytesMode().path)
def __cmp__(self, other):
if not isinstance(other, FilePath):
return NotImplemented
return cmp(self.path, other.path)
def createDirectory(self):
"""
Create the directory the L{FilePath} refers to.
@see: L{makedirs}
@raise OSError: If the directory cannot be created.
"""
os.mkdir(self.path)
def requireCreate(self, val=1):
"""
Sets the C{alwaysCreate} variable.
@param val: C{True} or C{False}, indicating whether opening this path
will be required to create the file or not.
@type val: L{bool}
@return: L{None}
"""
self.alwaysCreate = val
def create(self):
"""
Exclusively create a file, only if this file previously did not exist.
@return: A file-like object opened from this path.
"""
fdint = os.open(self.path, _CREATE_FLAGS)
# XXX TODO: 'name' attribute of returned files is not mutable or
# settable via fdopen, so this file is slightly less functional than the
# one returned from 'open' by default. send a patch to Python...
return os.fdopen(fdint, 'w+b')
def temporarySibling(self, extension=b""):
"""
Construct a path referring to a sibling of this path.
The resulting path will be unpredictable, so that other subprocesses
should neither accidentally attempt to refer to the same path before it
is created, nor they should other processes be able to guess its name
in advance.
@param extension: A suffix to append to the created filename. (Note
that if you want an extension with a '.' you must include the '.'
yourself.)
@type extension: L{bytes} or L{unicode}
@return: a path object with the given extension suffix, C{alwaysCreate}
set to True.
@rtype: L{FilePath} with a mode equal to the type of C{extension}
"""
ourPath = self._getPathAsSameTypeAs(extension)
sib = self.sibling(_secureEnoughString(ourPath) +
self.clonePath(ourPath).basename() + extension)
sib.requireCreate()
return sib
_chunkSize = 2 ** 2 ** 2 ** 2
def copyTo(self, destination, followLinks=True):
"""
Copies self to destination.
If self doesn't exist, an OSError is raised.
If self is a directory, this method copies its children (but not
itself) recursively to destination - if destination does not exist as a
directory, this method creates it. If destination is a file, an
IOError will be raised.
If self is a file, this method copies it to destination. If
destination is a file, this method overwrites it. If destination is a
directory, an IOError will be raised.
If self is a link (and followLinks is False), self will be copied
over as a new symlink with the same target as returned by os.readlink.
That means that if it is absolute, both the old and new symlink will
link to the same thing. If it's relative, then perhaps not (and
it's also possible that this relative link will be broken).
File/directory permissions and ownership will NOT be copied over.
If followLinks is True, symlinks are followed so that they're treated
as their targets. In other words, if self is a link, the link's target
will be copied. If destination is a link, self will be copied to the
destination's target (the actual destination will be destination's
target). Symlinks under self (if self is a directory) will be
followed and its target's children be copied recursively.
If followLinks is False, symlinks will be copied over as symlinks.
@param destination: the destination (a FilePath) to which self
should be copied
@param followLinks: whether symlinks in self should be treated as links
or as their targets
"""
if self.islink() and not followLinks:
os.symlink(os.readlink(self.path), destination.path)
return
# XXX TODO: *thorough* audit and documentation of the exact desired
# semantics of this code. Right now the behavior of existent
# destination symlinks is convenient, and quite possibly correct, but
# its security properties need to be explained.
if self.isdir():
if not destination.exists():
destination.createDirectory()
for child in self.children():
destChild = destination.child(child.basename())
child.copyTo(destChild, followLinks)
elif self.isfile():
with destination.open('w') as writefile, self.open() as readfile:
while 1:
# XXX TODO: optionally use os.open, os.read and
# O_DIRECT and use os.fstatvfs to determine chunk sizes
# and make *****sure**** copy is page-atomic; the
# following is good enough for 99.9% of everybody and
# won't take a week to audit though.
chunk = readfile.read(self._chunkSize)
writefile.write(chunk)
if len(chunk) < self._chunkSize:
break
elif not self.exists():
raise OSError(errno.ENOENT, "No such file or directory")
else:
# If you see the following message because you want to copy
# symlinks, fifos, block devices, character devices, or unix
# sockets, please feel free to add support to do sensible things in
# reaction to those types!
raise NotImplementedError(
"Only copying of files and directories supported")
def moveTo(self, destination, followLinks=True):
"""
Move self to destination - basically renaming self to whatever
destination is named.
If destination is an already-existing directory,
moves all children to destination if destination is empty. If
destination is a non-empty directory, or destination is a file, an
OSError will be raised.
If moving between filesystems, self needs to be copied, and everything
that applies to copyTo applies to moveTo.
@param destination: the destination (a FilePath) to which self
should be copied
@param followLinks: whether symlinks in self should be treated as links
or as their targets (only applicable when moving between
filesystems)
"""
try:
os.rename(self._getPathAsSameTypeAs(destination.path),
destination.path)
except OSError as ose:
if ose.errno == errno.EXDEV:
# man 2 rename, ubuntu linux 5.10 "breezy":
# oldpath and newpath are not on the same mounted filesystem.
# (Linux permits a filesystem to be mounted at multiple
# points, but rename(2) does not work across different mount
# points, even if the same filesystem is mounted on both.)
# that means it's time to copy trees of directories!
secsib = destination.temporarySibling()
self.copyTo(secsib, followLinks) # slow
secsib.moveTo(destination, followLinks) # visible
# done creating new stuff. let's clean me up.
mysecsib = self.temporarySibling()
self.moveTo(mysecsib, followLinks) # visible
mysecsib.remove() # slow
else:
raise
else:
self.changed()
destination.changed()
def statinfo(self, value=_SpecialNoValue):
"""
FilePath.statinfo is deprecated.
@param value: value to set statinfo to, if setting a value
@return: C{_statinfo} if getting, L{None} if setting
"""
# This is a pretty awful hack to use the deprecated decorator to
# deprecate a class attribute. Ideally, there would just be a
# statinfo property and a statinfo property setter, but the
# 'deprecated' decorator does not produce the correct FQDN on class
# methods. So the property stuff needs to be set outside the class
# definition - but the getter and setter both need the same function
# in order for the 'deprecated' decorator to produce the right
# deprecation string.
if value is _SpecialNoValue:
return self._statinfo
else:
self._statinfo = value
# This is all a terrible hack to get statinfo deprecated
_tmp = deprecated(
Version('Twisted', 15, 0, 0),
"other FilePath methods such as getsize(), "
"isdir(), getModificationTime(), etc.")(FilePath.statinfo)
FilePath.statinfo = property(_tmp, _tmp)
FilePath.clonePath = FilePath
| {
"content_hash": "d237fecbcaaa58468f7afb60f677f5f0",
"timestamp": "",
"source": "github",
"line_count": 1762,
"max_line_length": 80,
"avg_line_length": 33.341657207718505,
"alnum_prop": 0.6035779941444815,
"repo_name": "whitehorse-io/encarnia",
"id": "3f12a03f6c664dbd69547759ecc3e902e7c1ca73",
"size": "58871",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyenv/lib/python2.7/site-packages/twisted/python/filepath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63966"
},
{
"name": "CSS",
"bytes": "87525"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "91741"
},
{
"name": "JavaScript",
"bytes": "151335"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "Python",
"bytes": "24616242"
},
{
"name": "Shell",
"bytes": "8808"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import EmailForm
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
def email(request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
from_email = form.cleaned_data['email']
message = """
Name: %s
Message: %s
""" % (form.cleaned_data['name'], form.cleaned_data['message'])
try:
send_mail(subject, message, from_email, ['raony@torchmed.com'])
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('thanks')
else:
form = EmailForm()
return render(request, "contact/email.html", {'form': form})
def thanks(request):
return render(request, "contact/thanks.html")
| {
"content_hash": "64145b92cda13527bfa8b22377faa1ea",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 29.5,
"alnum_prop": 0.6214689265536724,
"repo_name": "torchmed/torchmed",
"id": "97299c2f5e33e17d006151d9d4a3d70d3d9aeff8",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contact/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "104047"
},
{
"name": "Dockerfile",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "55197"
},
{
"name": "JavaScript",
"bytes": "123205"
},
{
"name": "Python",
"bytes": "9941"
},
{
"name": "Shell",
"bytes": "2714"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^admin_tools/', include('admin_tools.urls')),
url(r'^admin/', include(admin.site.urls)),
# Simply show the master template.
url(r'^', include('cms.urls')),
]
# NOTE: The staticfiles_urlpatterns also discovers static files (ie. no need to run collectstatic). Both the static
# folder and the media folder are only served via Django if DEBUG = True.
urlpatterns += staticfiles_urlpatterns() + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| {
"content_hash": "5d446f38a0e52bbca2ee0be5a6bcaccd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 115,
"avg_line_length": 33.8,
"alnum_prop": 0.7266272189349112,
"repo_name": "modelbrouwers/modelbouwdag.nl",
"id": "077b252b4dc79e8db4180965986198b3bc08d389",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/modelbouwdag/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5329"
},
{
"name": "HTML",
"bytes": "14484"
},
{
"name": "JavaScript",
"bytes": "2006"
},
{
"name": "Python",
"bytes": "65881"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import codecs
import base64
import json
import sys
import os
import re
import time
import shutil
import random
import requests
import bs4
import urlparse
sys.path.insert(1, os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
from lib import commons
from lib import utils
from bs4 import BeautifulSoup
def strip_html(html_string):
soup = BeautifulSoup(html_string, 'html5lib')
print(soup)
text = soup.find_all(text=lambda text:isinstance(text, bs4.element.NavigableString))
return " ".join(text)
def print_css_links(url):
soup = commons.soup(url)
raw_css_urls = [link["href"] for link in soup.findAll("link") if "stylesheet" in link.get("rel", [])]
css_urls = [u'https:%s' % url if url.startswith(u'//') else url for url in raw_css_urls]
def main2():
# print_css_links(u'https://www.douban.com/note/645097084/')
r = commons.get('https://baike.baidu.com/item/%E7%8C%AB%E5%92%AA')
# r = commons.get('https://www.douban.com/note/645097084/')
print(r.encoding)
print(r.apparent_encoding)
print(requests.utils.get_encodings_from_content(r.content))
print(type(r.text))
print(r.text[:300])
def main():
with codecs.open(sys.argv[1], 'r', 'utf-8') as f:
content = f.read()
lines = content.split('\n')
for line in lines:
print(line)
print('-------')
def xhtml_validate():
src = os.path.abspath(sys.argv[1])
from lxml import etree
parser = etree.XMLParser()
for name in os.listdir(src):
fi = os.path.join(src, name)
with open(fi, 'rb') as f:
text = f.read()
print('Validating %s' % fi)
root = etree.fromstring(text, parser)
def strip_html_tags():
with codecs.open(sys.argv[1], 'r', 'utf-8') as f:
print(strip_html(f.read()))
if __name__ == '__main__':
xhtml_validate()
| {
"content_hash": "bae88cfccdd60095685ae3616584e92d",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 105,
"avg_line_length": 28.352941176470587,
"alnum_prop": 0.6369294605809128,
"repo_name": "mcxiaoke/python-labs",
"id": "7b10a1e0a320ede7335fb4173821837c20da3dcd",
"size": "1928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epub/playground.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "111771"
},
{
"name": "CSS",
"bytes": "1106"
},
{
"name": "HTML",
"bytes": "470"
},
{
"name": "JavaScript",
"bytes": "97375"
},
{
"name": "Python",
"bytes": "558910"
},
{
"name": "Shell",
"bytes": "117"
},
{
"name": "TSQL",
"bytes": "2967"
},
{
"name": "VBScript",
"bytes": "127"
}
],
"symlink_target": ""
} |
"""py-mstorrent Demo Helper Script"""
import signal
import time
import sys
import os
import os.path
START_TIME = time.time()
SCRIPT_DIR = os.path.dirname(os.path.realpath( __file__ ))
MY_WORKING_DIR = os.getcwd()
WORKING_DIR = os.path.realpath( os.path.join( MY_WORKING_DIR, '..' ) )
SRC_DIR = os.path.realpath(os.path.join( SCRIPT_DIR, '../' ))
class PeerDemo:
cmds = []
def __init__(self, *cmdlist):
self.cmds = cmdlist
def run(self, cl):
for cmd in self.cmds:
time.sleep(1)
cl.command(cmd)
def seconds():
return int( time.time() - START_TIME )
def waiter(waitTil, inc=5):
"""wait until *waitTil* seconds after START_TIME."""
target = START_TIME + waitTil
last = 0
while( time.time() < target ):
diff = target - time.time()
if inc <= seconds() - last:
print("t = {} sec".format(seconds()))
last = seconds()
if diff < inc:
time.sleep( diff )
return
else:
time.sleep(1)
continue
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
| {
"content_hash": "d4a3496e0cbe0949b73b5be06e771e43",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 70,
"avg_line_length": 25.131147540983605,
"alnum_prop": 0.5694716242661448,
"repo_name": "skgrush/py-mstorrent",
"id": "190fa904d812e03130935e175691112395e40dfa",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "617"
},
{
"name": "Python",
"bytes": "90161"
}
],
"symlink_target": ""
} |
"""
S3 Storage object.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from io import BytesIO
from django.conf import settings
from tempfile import SpooledTemporaryFile
from .base import BaseStorage, StorageError
class Storage(BaseStorage):
""" S3 API Storage. """
S3_BUCKET = getattr(settings, 'DBBACKUP_S3_BUCKET', None)
S3_ACCESS_KEY = getattr(settings, 'DBBACKUP_S3_ACCESS_KEY', None)
S3_SECRET_KEY = getattr(settings, 'DBBACKUP_S3_SECRET_KEY', None)
S3_DOMAIN = getattr(settings, 'DBBACKUP_S3_DOMAIN', 's3.amazonaws.com')
S3_IS_SECURE = getattr(settings, 'DBBACKUP_S3_USE_SSL', True)
S3_DIRECTORY = getattr(settings, 'DBBACKUP_S3_DIRECTORY', "django-dbbackups/")
if S3_DIRECTORY:
S3_DIRECTORY = '%s/' % S3_DIRECTORY.strip('/')
def __init__(self, server_name=None):
self._check_filesystem_errors()
self.name = 'AmazonS3'
self.conn = S3Connection(aws_access_key_id=self.S3_ACCESS_KEY,
aws_secret_access_key=self.S3_SECRET_KEY, host=self.S3_DOMAIN,
is_secure=self.S3_IS_SECURE)
self.bucket = self.conn.get_bucket(self.S3_BUCKET)
BaseStorage.__init__(self)
def _check_filesystem_errors(self):
if not self.S3_BUCKET:
raise StorageError('Filesystem storage requires DBBACKUP_S3_BUCKET to be defined in settings.')
if not self.S3_ACCESS_KEY:
raise StorageError('Filesystem storage requires DBBACKUP_S3_ACCESS_KEY to be defined in settings.')
if not self.S3_SECRET_KEY:
raise StorageError('Filesystem storage requires DBBACKUP_S3_SECRET_KEY to be defined in settings.')
def backup_dir(self):
return self.S3_DIRECTORY
def delete_file(self, filepath):
self.bucket.delete_key(filepath)
def list_directory(self):
return [k.name for k in self.bucket.list(prefix=self.S3_DIRECTORY)]
def write_file(self, file_path):
# Use multipart upload because normal upload maximum is 5 GB.
filepath = os.path.join(self.S3_DIRECTORY, os.path.basename(file_path))
with open(file_path, 'rb') as f:
handle = self.bucket.initiate_multipart_upload(filepath)
try:
chunk = 1
while True:
chunkdata = f.read(5 * 1024 * 1024)
if not chunkdata:
break
tmpfile = BytesIO(chunkdata)
tmpfile.seek(0)
handle.upload_part_from_file(tmpfile, chunk)
tmpfile.close()
chunk += 1
handle.complete_upload()
except Exception:
handle.cancel_upload()
raise
def read_file(self, filepath):
""" Read the specified file and return it's handle. """
key = Key(self.bucket)
key.key = filepath
filehandle = SpooledTemporaryFile(max_size=10 * 1024 * 1024)
key.get_contents_to_file(filehandle)
return filehandle
| {
"content_hash": "67066c3882809478ab143a35f7c7a02d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 111,
"avg_line_length": 39.7125,
"alnum_prop": 0.6188227887944602,
"repo_name": "leukeleu/django-dbbackup",
"id": "28f1ccffa85b64c5669d408b93d4a4b329711633",
"size": "3177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbbackup/storage/s3_storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "52280"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from data.models import Cartoon
class SearchSerializer(serializers.ModelSerializer):
characters = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Cartoon
fields = [
'id',
'title',
'date',
'characters',
'comment',
'thumbnail_hash',
]
@staticmethod
def get_characters(obj):
return obj.idols.split()
class Costar(object):
def __init__(self, name, count):
self.name = name
self.count = count
class CostarSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255)
count = serializers.IntegerField()
def create(self, validated_data):
return Costar(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.count = validated_data.get('count', instance.count)
return instance
| {
"content_hash": "186d901ea0a8b1ba59d46d82cef803ab",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 68,
"avg_line_length": 25.5,
"alnum_prop": 0.6264705882352941,
"repo_name": "zaubermaerchen/imas_cg_api",
"id": "f9cb83cf1e436fc52a50af8be30fd9f3f82b0a7e",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/cartoon/serializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "511"
},
{
"name": "Python",
"bytes": "30235"
},
{
"name": "Shell",
"bytes": "20"
},
{
"name": "TSQL",
"bytes": "2535"
}
],
"symlink_target": ""
} |
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '../libs'))
# -- stdlib --
import json
import socket
import time
# -- third party --
# -- own --
# -- code --
ts = int(time.time())
FIELDS = [
# "pxname", # [LFBS]: proxy name
# "svname", # [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
"qcur", # [..BS]: current queued requests. For the backend this reports the number queued without a server assigned.
"qmax", # [..BS]: max value of qcur
"scur", # [LFBS]: current sessions
"smax", # [LFBS]: max sessions
"slim", # [LFBS]: configured session limit
"stot", # [LFBS]: cumulative number of connections
"bin", # [LFBS]: bytes in
"bout", # [LFBS]: bytes out
"dreq", # [LFB.]: requests denied because of security concerns.
"dresp", # [LFBS]: responses denied because of security concerns.
"ereq", # [LF..]: request errors.
"econ", # [..BS]: number of requests that encountered an error trying to connect to a backend server.
"eresp", # [..BS]: response errors. srv_abrt will be counted here also.
"wretr", # [..BS]: number of times a connection to a server was retried.
"wredis", # [..BS]: number of times a request was redispatched to another server.
"status", # [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...)
"weight", # [..BS]: total weight (backend), server weight (server)
"act", # [..BS]: number of active servers (backend), server is active (server)
"bck", # [..BS]: number of backup servers (backend), server is backup (server)
"chkfail", # [...S]: number of failed checks. (Only counts checks failed when the server is up.)
"chkdown", # [..BS]: number of UP->DOWN transitions.
"lastchg", # [..BS]: number of seconds since the last UP<->DOWN transition
"downtime", # [..BS]: total downtime (in seconds).
"qlimit", # [...S]: configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit)
"-", # [LFBS]: process id (0 for first instance, 1 for second, ...)
"-", # [LFBS]: unique proxy id
"-", # [L..S]: server id (unique inside a proxy)
"throttle", # [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
"-", # [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching.
"-", # [...S]: id of proxy/server if tracking is enabled.
"-", # [LFBS]: (0=frontend, 1=backend, 2=server, 3=socket/listener)
"rate", # [.FBS]: number of sessions per second over last elapsed second
"rate_lim", # [.F..]: configured limit on new sessions per second
"rate_max", # [.FBS]: max number of new sessions per second
"check_status", # [...S]: status of last health check, one of:
"check_code", # [...S]: layer5-7 code, if available
"-", # [...S]: time in ms took to finish last health check
"hrsp_1xx", # [.FBS]: http responses with 1xx code
"hrsp_2xx", # [.FBS]: http responses with 2xx code
"hrsp_3xx", # [.FBS]: http responses with 3xx code
"hrsp_4xx", # [.FBS]: http responses with 4xx code
"hrsp_5xx", # [.FBS]: http responses with 5xx code
"hrsp_other", # [.FBS]: http responses with other codes (protocol error)
"hanafail", # [...S]: failed health checks details
"req_rate", # [.F..]: HTTP requests per second over last elapsed second
"req_rate_max", # [.F..]: max number of HTTP requests per second observed
"req_tot", # [.F..]: total number of HTTP requests received
"cli_abrt", # [..BS]: number of data transfers aborted by the client
"srv_abrt", # [..BS]: number of data transfers aborted by the server (inc. in eresp)
"comp_in", # [.FB.]: number of HTTP response bytes fed to the compressor
"comp_out", # [.FB.]: number of HTTP response bytes emitted by the compressor
"comp_byp", # [.FB.]: number of bytes that bypassed the HTTP compressor (CPU/BW limit)
"comp_rsp", # [.FB.]: number of HTTP responses that were compressed
"lastsess", # [..BS]: number of seconds since last session assigned to server/backend
"last_chk", # [...S]: last health check contents or textual error
"last_agt", # [...S]: last agent check contents or textual error
"qtime", # [..BS]: the average queue time in ms over the 1024 last requests
"ctime", # [..BS]: the average connect time in ms over the 1024 last requests
"rtime", # [..BS]: the average response time in ms over the 1024 last requests (0 for TCP)
"ttime", # [..BS]: the average total session time in ms over the 1024 last requests
]
rst = []
try:
resp = ''
s = socket.socket(socket.AF_UNIX)
s.connect('/run/haproxy/admin.sock')
s.sendall('show stat\n')
while True:
frag = s.recv(10000)
if not frag:
break
resp += frag
except Exception:
pass
for entry in resp.strip().split('\n'):
fields = entry.split(',')
px, sv = fields[:2]
d = {}
for n, v in zip(FIELDS, fields[2:]):
if n == '-' or v == '' or not v.isdigit():
continue
v = float(v)
d[n] = v
rst.append({
"metric": "haproxy.%s" % n,
"timestamp": ts,
"step": 30,
"value": v,
"tags": {
"proxy": px, "proxy-srv": sv,
},
})
if 'slim' in d and sv == 'FRONTEND':
rst.append({
"metric": "haproxy.sratio",
"timestamp": ts,
"step": 30,
"value": d['scur'] / d['slim'],
"tags": {
"proxy": px, "proxy-srv": sv,
},
})
print json.dumps(rst)
| {
"content_hash": "dc41d342ec2f1ee7ab0aabbf07d41a7b",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 133,
"avg_line_length": 46.94656488549618,
"alnum_prop": 0.5549593495934959,
"repo_name": "kadashu/satori",
"id": "c2154abe4c8fcd1726dd4c6fc4634bdd8399bd76",
"size": "6212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori-rules/plugin/haproxy/30_haproxy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189613"
},
{
"name": "Clojure",
"bytes": "52043"
},
{
"name": "Go",
"bytes": "102479"
},
{
"name": "HTML",
"bytes": "10254"
},
{
"name": "JavaScript",
"bytes": "16585"
},
{
"name": "Python",
"bytes": "4195260"
},
{
"name": "Ruby",
"bytes": "2312"
},
{
"name": "Shell",
"bytes": "18923"
},
{
"name": "Smarty",
"bytes": "4807"
}
],
"symlink_target": ""
} |
from oslo_policy import policy
from keystone.common.policies import base
mapping_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_mapping',
check_str=base.RULE_ADMIN_REQUIRED,
description=('Create a new federated mapping containing one or '
'more sets of rules.'),
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'PUT'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_mapping',
check_str=base.RULE_ADMIN_REQUIRED,
description='Get a federated mapping.',
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_mappings',
check_str=base.RULE_ADMIN_REQUIRED,
description='List federated mappings.',
operations=[{'path': '/v3/OS-FEDERATION/mappings',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_mapping',
check_str=base.RULE_ADMIN_REQUIRED,
description='Delete a federated mapping.',
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'DELETE'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_mapping',
check_str=base.RULE_ADMIN_REQUIRED,
description='Update a federated mapping.',
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'PATCH'}])
]
def list_rules():
return mapping_policies
| {
"content_hash": "8db941d654d1bd740174113db0597508",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 39.97560975609756,
"alnum_prop": 0.6070774862721171,
"repo_name": "rajalokan/keystone",
"id": "ff2778030006db86534c33d0a0848647a49c05a0",
"size": "2185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/common/policies/mapping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3865941"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
} |
num1 = int(input("digite o primeiro numero: "))
num2 = int(input("digite o segundo numero: "))
print("A soma de {first} + {second} eh igual a {soma}".format(first = num1, second = num2, soma = num1+num2))
| {
"content_hash": "c0114cfe2c02e1ba3932de86a367c681",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 109,
"avg_line_length": 51.5,
"alnum_prop": 0.6650485436893204,
"repo_name": "henriquejensen/Python_Zumbis",
"id": "b3d7bee06f3b4c1a9c7579ec6c083529593b4538",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lista-1/imprime_soma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18571"
}
],
"symlink_target": ""
} |
from . import base
from subprocess import call
class Canto(base.ThreadedPollText):
"""
Display RSS feeds updates using the canto console reader.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("fetch", False, "Whether to fetch new items on update"),
("feeds", [], "List of feeds to display, empty for all"),
("one_format", "{name}: {number}", "One feed display format"),
("all_format", "{number}", "All feeds display format"),
]
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(Canto.defaults)
def poll(self):
if not self.feeds:
arg = "-a"
if self.fetch:
arg += "u"
output = self.all_format.format(
number=self.call_process(["canto", arg])[:-1]
)
return output
else:
if self.fetch:
call(["canto", "-u"])
return "".join([self.one_format.format(
name=feed,
number=self.call_process(["canto", "-n", feed])[:-1]
) for feed in self.feeds])
| {
"content_hash": "59882f7c103b423cbe41bc7f4818984c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 32.611111111111114,
"alnum_prop": 0.5229982964224872,
"repo_name": "himaaaatti/qtile",
"id": "fa11c4befde4359299d0eab152b84c83365ac9dd",
"size": "2439",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libqtile/widget/canto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3598"
},
{
"name": "Makefile",
"bytes": "1351"
},
{
"name": "Python",
"bytes": "951823"
},
{
"name": "Shell",
"bytes": "2870"
}
],
"symlink_target": ""
} |
from flask import Flask, Response, request
from twilio.twiml.voice_response import VoiceResponse
app = Flask(__name__)
@app.route("/voice", methods=['POST'])
def get_voice_twiml():
"""Respond to incoming calls with a simple text message."""
resp = VoiceResponse()
if "To" in request.form:
resp.dial(request.form["To"], caller_id="+15017122661")
else:
resp.say("Thanks for calling!")
return Response(str(resp), mimetype='text/xml')
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "57d338e461cf0a5110d33cf67a18be9a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 25,
"alnum_prop": 0.6476190476190476,
"repo_name": "TwilioDevEd/api-snippets",
"id": "a1b0ebed485904a9b5d6e382645a7c1dcd099fe6",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/response-twiml-dial/response-twiml-dial.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mentha', '0003_budget'),
]
operations = [
migrations.CreateModel(
name='Payee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('fi_name', models.CharField(max_length=100, editable=False, blank=True)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field='id', editable=False)),
('category', models.ForeignKey(to='mentha.Category', to_field='id')),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "ec7a88a73272af3c84d7fa362e83b515",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 114,
"avg_line_length": 33.82142857142857,
"alnum_prop": 0.5776135163674763,
"repo_name": "ateoto/django-mentha",
"id": "443cd134c505bf34c3b52e17b2447d217fa4c650",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mentha/migrations/0004_payee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41725"
},
{
"name": "JavaScript",
"bytes": "74673"
},
{
"name": "Python",
"bytes": "20044"
}
],
"symlink_target": ""
} |
'''
Created on 5 cze 2014
@author: Przemek
'''
from src.parser.measurable import Measurable
from src.items.code_item import CodeItem, Instruction
from src.items.bytes import Bytes
class CodeItemSection(Measurable):
'''
classdocs
'''
def append(self, code_item):
self.data.insert(len(self.data) - 1, code_item)
def __init__(self, parent):
'''
Constructor
'''
Measurable.__init__(self, parent)
def initialize(self):
self.data.append(Bytes(self, 2))
code_item = CodeItem(self)
code_item.registersSize.value = 1
code_item.insSize.value = 1
code_item.outsSize.value = 1
code_item.triesSize.value = 0
code_item.insnsSize.value = 4
instruction = Instruction(code_item)
byte = Bytes(instruction.data, 1, 0x70)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x10)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x02)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x00)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x00)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x00)
instruction.data.data.append(byte)
code_item.instructions.append(instruction)
instruction = Instruction(code_item)
byte = Bytes(instruction.data, 1, 0x0e)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x00)
instruction.data.data.append(byte)
code_item.instructions.append(instruction)
self.append(code_item)
code_item = CodeItem(self)
code_item.registersSize.value = 1
code_item.insSize.value = 1
code_item.outsSize.value = 0
code_item.triesSize.value = 0
code_item.insnsSize.value = 1
instruction = Instruction(code_item)
byte = Bytes(instruction.data, 1, 0x0e)
instruction.data.data.append(byte)
byte = Bytes(instruction.data, 1, 0x00)
instruction.data.data.append(byte)
code_item.instructions.append(instruction)
self.append(code_item) | {
"content_hash": "2fa59218dc01c246939cbe5c13d894df",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 55,
"avg_line_length": 30.605263157894736,
"alnum_prop": 0.6096302665520207,
"repo_name": "PrzemekBurczyk/dalvik-compiler",
"id": "e692d2146615401c90b4f90b52a0e6394d2a68a9",
"size": "2326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sections/code_item_section.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "130801"
}
],
"symlink_target": ""
} |
from metakernel import Magic, option
try:
from calysto_scheme import scheme
except:
scheme = None
class SchemeMagic(Magic):
def __init__(self, kernel):
super(SchemeMagic, self).__init__(kernel)
self.retval = None
def line_scheme(self, *args):
"""
%scheme CODE - evaluate code as Scheme
This line magic will evaluate the CODE (either expression or
statement) as Scheme code.
Examples:
%scheme (define x 42)
%scheme (import "math")
%scheme (+ x + math.pi)
"""
code = " ".join(args)
self.retval = self.eval(code)
def eval(self, code):
if scheme:
return scheme.execute_string_rm(code.strip())
else:
raise Exception("calysto_scheme is required")
@option(
"-e", "--eval_output", action="store_true", default=False,
help="Use the retval value from the Scheme cell as code in the kernel language."
)
def cell_scheme(self, eval_output=False):
"""
%%scheme - evaluate contents of cell as Scheme
This cell magic will evaluate the cell (either expression or
statement) as Scheme code.
The -e or --eval_output flag signals that the retval value expression
will be used as code for the cell to be evaluated by the host
language.
Examples:
%%scheme
(define x 42)
%%scheme
(import "math")
(define retval (+ x math.pi))
%%scheme -e
(define retval "this = code")
%%scheme -e
"this = code"
"""
if self.code.strip():
if eval_output:
self.code = self.eval(self.code)
self.evaluate = True
else:
self.retval = self.eval(self.code)
self.evaluate = False
def post_process(self, retval):
if retval is not None:
return retval
else:
return self.retval
def register_magics(kernel):
kernel.register_magics(SchemeMagic)
def register_ipython_magics():
from metakernel import IPythonKernel
from IPython.core.magic import register_line_magic, register_cell_magic
kernel = IPythonKernel()
magic = SchemeMagic(kernel)
@register_line_magic
def scheme(line):
magic.line_scheme(line)
return magic.retval
@register_cell_magic
def scheme(line, cell):
magic.code = cell
magic.cell_scheme()
return magic.retval
| {
"content_hash": "c3d1f36d7749efb19a33988728314258",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 26.947916666666668,
"alnum_prop": 0.568225744105141,
"repo_name": "Calysto/metakernel",
"id": "2d1e7695cbd8c585af9728cd84a0617918e52281",
"size": "2692",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "metakernel/magics/scheme_magic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1130"
},
{
"name": "Python",
"bytes": "250769"
}
],
"symlink_target": ""
} |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from resource_management import *
from stacks.utils.RMFTestCase import *
import getpass
import json
@patch.object(getpass, "getuser", new = MagicMock(return_value='some_user'))
@patch.object(Hook, "run_custom_hook", new = MagicMock())
class TestHookBeforeInstall(RMFTestCase):
def test_hook_default(self):
self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
classname="BeforeInstallHook",
command="hook",
config_file="default.json"
)
self.assertResourceCalled('Repository', 'HDP-2.0._',
action=['create'],
base_url='http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0',
components=['HDP', 'main'],
mirror_list=None,
repo_file_name='HDP',
repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0'
)
self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
self.assertNoMoreResources()
def test_hook_no_repos(self):
config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
command_json = json.load(f)
command_json['hostLevelParams']['repo_info'] = "[]"
self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
classname="BeforeInstallHook",
command="hook",
config_dict=command_json)
self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
self.assertNoMoreResources()
def test_hook_default_repository_file(self):
self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
classname="BeforeInstallHook",
command="hook",
config_file="repository_file.json"
)
self.assertResourceCalled('Repository', 'HDP-2.2-repo-4',
action=['create'],
base_url='http://repo1/HDP/centos5/2.x/updates/2.2.0.0',
components=['HDP', 'main'],
mirror_list=None,
repo_file_name='ambari-hdp-4',
repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
append_to_file=False)
self.assertResourceCalled('Repository', 'HDP-UTILS-1.1.0.20-repo-4',
action=['create'],
base_url='http://repo1/HDP-UTILS/centos5/2.x/updates/2.2.0.0',
components=['HDP-UTILS', 'main'],
mirror_list=None,
repo_file_name='ambari-hdp-4',
repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
append_to_file=True)
self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
self.assertNoMoreResources()
| {
"content_hash": "caa5a6b0d35abaef01a58577aec126ab",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 178,
"avg_line_length": 45.967032967032964,
"alnum_prop": 0.6643557255558212,
"repo_name": "radicalbit/ambari",
"id": "008a1e7516d02cdc2c75727746b68fa8066bb2f2",
"size": "4206",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
} |
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
sys.path.append(os.path.realpath('..'))
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from chapter_05 import data_utils
from chapter_05 import seq2seq_model
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("en_vocab_size", 40000, "Size of the english vocabulary.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 40000, "Size of the french vocabulary.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_string("train_dir", os.path.realpath('../datasets/WMT'), "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model
def train():
wmt = data_utils.prepare_wmt_dataset()
# en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_dataset()
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(wmt.en_dev_ids_path, wmt.fr_dev_ids_path)
train_set = read_data(wmt.en_train_ids_path, wmt.fr_train_ids_path, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(loss) if loss < 300 else float('inf')
print("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
train()
| {
"content_hash": "a2fc3ef075ef8fafe2956948e122ee14",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 102,
"avg_line_length": 50.34615384615385,
"alnum_prop": 0.6143184546545891,
"repo_name": "mlwithtf/mlwithtf",
"id": "c86dd6995bf81875051408a2e9f85573e2b736ce",
"size": "9841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter_05/translate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3254"
},
{
"name": "Jupyter Notebook",
"bytes": "46294"
},
{
"name": "Python",
"bytes": "810478"
},
{
"name": "Shell",
"bytes": "18242"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import collections
import os
import pathlib
import procrunner
import pytest
from cctbx import uctbx
from dxtbx.model import ExperimentList
from dxtbx.serialize import load
from dials.array_family import flex
def unit_cells_are_similar(
uc1, uc2, relative_length_tolerance=0.01, absolute_angle_tolerance=1
):
# see also uctbx.cpp unit_cell::is_similar_to()
l1 = uc1.parameters()
l2 = uc2.parameters()
for i in range(3):
if abs(min(l1[i], l2[i]) / max(l1[i], l2[i]) - 1) > relative_length_tolerance:
return False
for i in range(3, 6):
if abs(l1[i] - l2[i]) > absolute_angle_tolerance:
if abs(l1[i] - (180 - l2[i])) > absolute_angle_tolerance:
return False
return True
_indexing_result = collections.namedtuple(
"indexing", ["indexed_reflections", "experiments", "rmsds"]
)
def run_indexing(
reflections,
experiment,
working_directory,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=1,
relative_length_tolerance=0.005,
absolute_angle_tolerance=0.5,
):
commands = ["dials.index"]
if isinstance(reflections, list):
commands.extend(reflections)
else:
commands.append(reflections)
if isinstance(experiment, list):
commands.extend(experiment)
else:
commands.append(experiment)
commands.extend(extra_args)
result = procrunner.run(commands, working_directory=working_directory)
assert not result.returncode and not result.stderr
out_expts = working_directory / "indexed.expt"
out_refls = working_directory / "indexed.refl"
assert out_expts.is_file()
assert out_refls.is_file()
experiments_list = load.experiment_list(out_expts, check_format=False)
assert len(experiments_list.crystals()) == n_expected_lattices
indexed_reflections = flex.reflection_table.from_file(out_refls)
indexed_reflections.assert_experiment_identifiers_are_consistent(experiments_list)
rmsds = None
for i, experiment in enumerate(experiments_list):
assert unit_cells_are_similar(
experiment.crystal.get_unit_cell(),
expected_unit_cell,
relative_length_tolerance=relative_length_tolerance,
absolute_angle_tolerance=absolute_angle_tolerance,
), (
experiment.crystal.get_unit_cell().parameters(),
expected_unit_cell.parameters(),
)
sg = experiment.crystal.get_space_group()
assert sg.type().hall_symbol() == expected_hall_symbol, (
sg.type().hall_symbol(),
expected_hall_symbol,
)
reflections = indexed_reflections.select(indexed_reflections["id"] == i)
mi = reflections["miller_index"]
assert (mi != (0, 0, 0)).count(False) == 0
reflections = reflections.select(mi != (0, 0, 0))
reflections = reflections.select(
reflections.get_flags(reflections.flags.used_in_refinement)
)
assert len(reflections) > 0
obs_x, obs_y, obs_z = reflections["xyzobs.mm.value"].parts()
calc_x, calc_y, calc_z = reflections["xyzcal.mm"].parts()
rmsd_x = flex.mean(flex.pow2(obs_x - calc_x)) ** 0.5
rmsd_y = flex.mean(flex.pow2(obs_y - calc_y)) ** 0.5
rmsd_z = flex.mean(flex.pow2(obs_z - calc_z)) ** 0.5
rmsds = (rmsd_x, rmsd_y, rmsd_z)
for actual, expected in zip(rmsds, expected_rmsds):
assert actual <= expected, f"{rmsds} {expected_rmsds}"
assert experiment.identifier != ""
expt = ExperimentList()
expt.append(experiment)
reflections.assert_experiment_identifiers_are_consistent(expt)
return _indexing_result(indexed_reflections, experiments_list, rmsds)
def test_index_i04_weak_data_fft3d(dials_regression, tmp_path):
# thaumatin
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "full.pickle")
sequence_path = os.path.join(data_dir, "experiments_import.json")
extra_args = [
"bin_size_fraction=0.25",
"image_range=1,20",
"image_range=250,270",
"image_range=520,540",
]
expected_unit_cell = uctbx.unit_cell((57.7, 57.7, 149.8, 90, 90, 90))
expected_rmsds = (0.05, 0.04, 0.0005)
expected_hall_symbol = " P 1"
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_index_trypsin_four_lattice_P212121(dials_regression, tmp_path):
# synthetic trypsin multi-lattice dataset (4 lattices)
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "trypsin"
pickle_path = data_dir / "P1_X6_1_2_3_4.pickle"
sequence_path = data_dir / "experiments_P1_X6_1_2_3_4.json"
extra_args = [
"indexing.method=real_space_grid_search",
"reflections_per_degree=10",
"n_macro_cycles=5",
"known_symmetry.unit_cell=54.3,58.3,66.5,90,90,90",
"known_symmetry.space_group=P212121",
"image_range=0,10",
"beam.fix=all",
"detector.fix=all",
"max_cell=70",
]
expected_unit_cell = uctbx.unit_cell((54.3, 58.3, 66.5, 90, 90, 90))
expected_rmsds = (0.28, 0.30, 0.006)
expected_hall_symbol = " P 2ac 2ab"
n_expected_lattices = 1
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=n_expected_lattices,
relative_length_tolerance=0.02,
absolute_angle_tolerance=1,
)
def test_index_i04_weak_data_fft1d(dials_regression, tmp_path):
# thaumatin
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "i04_weak_data"
pickle_path = data_dir / "full.pickle"
sequence_path = data_dir / "experiments_import.json"
extra_args = [
"n_macro_cycles=2",
"indexing.method=fft1d",
"bin_size_fraction=0.25",
"image_range=1,20",
"image_range=250,270",
"image_range=520,540",
]
expected_unit_cell = uctbx.unit_cell((57.7, 57.7, 149.9, 90, 90, 90))
expected_rmsds = (0.06, 0.05, 0.0005)
expected_hall_symbol = " P 1"
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_index_trypsin_index_assignment_local(dials_regression, tmp_path):
# synthetic trypsin multi-lattice dataset (3 lattices)
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "trypsin"
pickle_path = data_dir / "P1_X6_1_2_3.pickle"
sequence_path = data_dir / "experiments_P1_X6_1_2_3.json"
extra_args = [
"indexing.method=real_space_grid_search",
"d_min_start=3",
"n_macro_cycles=3",
"known_symmetry.unit_cell=54.3,58.3,66.5,90,90,90",
"known_symmetry.space_group=P212121",
"image_range=0,10",
"beam.fix=all",
"detector.fix=all",
"max_lattices=3",
"index_assignment.method=local",
"nearest_neighbours=50",
]
expected_unit_cell = uctbx.unit_cell((54.3, 58.3, 66.5, 90, 90, 90))
expected_rmsds = (0.33, 0.40, 0.0024)
expected_hall_symbol = " P 2ac 2ab"
n_expected_lattices = 3
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=n_expected_lattices,
relative_length_tolerance=0.02,
absolute_angle_tolerance=1,
)
def test_index_peak_search_clean(dials_regression, tmp_path):
# test indexing from single image of i04_weak_data
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "i04_weak_data"
pickle_path = data_dir / "first_image.pickle"
sequence_path = data_dir / "experiments_import.json"
extra_args = [
"indexing.method=fft3d",
"known_symmetry.space_group=P4",
"known_symmetry.unit_cell=57.8,57.8,150,90,90,90",
"peak_search=clean",
"min_samples=15",
"n_macro_cycles=4",
"reciprocal_space_grid.d_min=4",
]
expected_unit_cell = uctbx.unit_cell((57.8, 57.8, 150, 90, 90, 90))
expected_rmsds = (0.06, 0.07, 0.003)
expected_hall_symbol = " P 4"
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.mark.parametrize("specify_unit_cell", [False, True])
def test_index_imosflm_tutorial(dials_regression, tmp_path, specify_unit_cell):
# test on spots derived from imosflm tutorial data:
# http://www.ccp4.ac.uk/courses/BCA2005/tutorials/dataproc-tutorial.html
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "imosflm_hg_mar"
pickle_path = data_dir / "strong.pickle"
sequence_path = data_dir / "experiments.json"
unit_cell = uctbx.unit_cell((58.373, 58.373, 155.939, 90, 90, 120))
hall_symbol = '-R 3 2"'
extra_args = [
"bin_size_fraction=0.25",
'known_symmetry.space_group="Hall: %s"' % hall_symbol.replace('"', '\\"'),
]
if specify_unit_cell:
extra_args.append(
'known_symmetry.unit_cell="%s %s %s %s %s %s"' % unit_cell.parameters()
)
expected_unit_cell = unit_cell
expected_hall_symbol = hall_symbol
expected_rmsds = (0.08, 0.11, 0.004)
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.fixture(scope="session")
def insulin_spotfinding(dials_data, tmp_path_factory):
"""Return experiment and reflection files for 2 images of the insulin dataset"""
data_dir = dials_data("insulin", pathlib=True)
tmp_path = tmp_path_factory.mktemp("insulin")
command = ["dials.import"]
for i, image_path in enumerate(("insulin_1_001.img", "insulin_1_045.img")):
command.append(data_dir / image_path)
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
experiment = tmp_path / "imported.expt"
assert experiment.is_file()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
reflections = tmp_path / "strong.refl"
assert reflections.is_file()
return experiment, reflections
@pytest.mark.parametrize("method", ["fft3d", "fft1d", "real_space_grid_search"])
def test_index_insulin_multi_sequence(insulin_spotfinding, tmp_path, method):
experiment, reflections = insulin_spotfinding
expected_unit_cell = uctbx.unit_cell(
(78.163, 78.163, 78.163, 90.000, 90.000, 90.000)
)
expected_hall_symbol = " I 2 2 3"
expected_rmsds = (0.05, 0.06, 0.01)
extra_args = [
'known_symmetry.unit_cell="%s %s %s %s %s %s"'
% expected_unit_cell.parameters(),
f'known_symmetry.space_group="Hall: {expected_hall_symbol}"',
f"indexing.method={method}",
"treat_single_image_as_still=False",
]
run_indexing(
reflections,
experiment,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.fixture(scope="session")
def insulin_spotfinding_stills(dials_data, tmp_path_factory):
"""Return experiment and reflection files for 1 image of the insulin
dataset treated as still image"""
data_dir = dials_data("insulin", pathlib=True)
tmp_path = tmp_path_factory.mktemp("insulin")
command = [
"dials.import",
"convert_sequences_to_stills=True",
data_dir / "insulin_1_001.img",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
experiment = tmp_path / "imported.expt"
assert experiment.is_file()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
reflections = tmp_path / "strong.refl"
assert reflections.is_file()
return experiment, reflections
@pytest.mark.xfel
@pytest.mark.parametrize("method", ["fft3d", "fft1d", "real_space_grid_search"])
def test_index_insulin_force_stills(insulin_spotfinding_stills, tmp_path, method):
experiment, reflections = insulin_spotfinding_stills
expected_unit_cell = uctbx.unit_cell(
(78.092, 78.092, 78.092, 90.000, 90.000, 90.000)
)
expected_hall_symbol = " I 2 2 3"
expected_rmsds = (0.05, 0.06, 0.01)
extra_args = [
"stills.indexer=stills",
'known_symmetry.unit_cell="%s %s %s %s %s %s"'
% expected_unit_cell.parameters(),
f'known_symmetry.space_group="Hall: {expected_hall_symbol}"',
f"indexing.method={method}",
]
run_indexing(
reflections,
experiment,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_multiple_experiments(dials_regression, tmp_path):
# Test indexing 4 lysozyme still shots in a single dials.index job
# - the first image doesn't index
# - the last three images do index
data_dir = (
pathlib.Path(dials_regression) / "indexing_test_data" / "i24_lysozyme_stills"
)
pickle_path = data_dir / "strong.pickle"
experiments_json = data_dir / "imported_experiments.json"
expected_unit_cell = uctbx.unit_cell((38.06, 78.78, 78.91, 90, 90, 90))
expected_hall_symbol = " P 1"
expected_rmsds = (0.1, 0.07, 0.0)
extra_args = [
"stills.indexer=sequences",
"joint_indexing=False",
"outlier.algorithm=sauter_poon",
]
run_indexing(
pickle_path,
experiments_json,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=3,
relative_length_tolerance=0.01,
)
def test_index_4rotation(dials_regression, tmp_path):
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "4rotation"
pickle_path = data_dir / "strong.pickle"
sequence_path = data_dir / "experiments.json"
extra_args = [
"max_refine=10",
"reflections_per_degree=50",
"known_symmetry.space_group=R3",
"n_macro_cycles=3",
]
expected_unit_cell = uctbx.unit_cell((48.397, 48.397, 284.767, 90, 90, 120))
expected_rmsds = (0.06, 0.08, 0.22)
expected_hall_symbol = " R 3"
result = run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 276800, len(result.indexed_reflections)
def test_index_small_molecule_multi_sequence_4(dials_regression, tmp_path):
# test for small molecule multi-sequence indexing, 4 sequences with different values
# of goniometer.fixed_rotation()
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "multi_sweep"
pickle_paths = [
sorted((data_dir / f"SWEEP{i + 1}" / "index").glob("*_strong.pickle"))[0]
for i in range(4)
]
sequence_paths = [
data_dir / f"SWEEP{i + 1}" / "index" / "experiments.json" for i in range(4)
]
extra_args = ["known_symmetry.space_group=I4", "filter_ice=False"]
expected_unit_cell = uctbx.unit_cell((7.310, 7.310, 6.820, 90.000, 90.000, 90.000))
expected_rmsds = (0.10, 0.7, 0.5)
expected_hall_symbol = " I 4"
result = run_indexing(
pickle_paths,
sequence_paths,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 1250, len(result.indexed_reflections)
def test_index_small_molecule_multi_sequence_3(dials_regression, tmp_path):
# test for small molecule multi-sequence indexing, 3 sequences with different values
# of goniometer setting rotation (i.e. phi scans)
data_dir = pathlib.Path(dials_regression) / "dials-191"
print(data_dir)
pickle_paths = [
sorted(data_dir.glob(f"*_SWEEP{i + 1}_strong.pickle"))[0] for i in range(3)
]
sequence_paths = [
sorted(data_dir.glob(f"*_SWEEP{i + 1}_experiments.json"))[0] for i in range(3)
]
extra_args = ["filter_ice=False"]
expected_unit_cell = uctbx.unit_cell(
(9.440, 15.313, 17.126, 90.073, 90.106, 79.248)
)
expected_rmsds = (0.32, 0.34, 0.005)
expected_hall_symbol = " P 1"
result = run_indexing(
pickle_paths,
sequence_paths,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 12000, len(result.indexed_reflections)
# expect at least indexed 2000 reflections per experiment
for i in range(3):
assert (result.indexed_reflections["id"] == i).count(True) > 2000
def test_index_small_molecule_ice_max_cell(dials_regression, tmp_path):
# test for small molecule indexing: presence of ice rings makes max-cell
# estimation tricky
data_dir = os.path.join(dials_regression, "indexing_test_data", "MXSW-904")
pickle_path = os.path.join(data_dir, "1_SWEEP1_strong.pickle")
experiments = os.path.join(data_dir, "1_SWEEP1_experiments.json")
extra_args = ["filter_ice=False"]
expected_unit_cell = uctbx.unit_cell((11.72, 11.72, 11.74, 109.08, 109.24, 108.99))
expected_rmsds = (0.06, 0.05, 0.04)
expected_hall_symbol = " P 1"
result = run_indexing(
pickle_path,
experiments,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 1300, len(result.indexed_reflections)
@pytest.mark.xfail
def test_refinement_failure_on_max_lattices_a15(dials_regression, tmp_path):
"""Problem: Sometimes there is enough data to index, but not enough to
refine. If this happens in the (N>1)th crystal of max_lattices, then
all existing solutions are also dropped."""
data_dir = os.path.join(dials_regression, "indexing_test_data", "lattice_failures")
result = procrunner.run(
[
"dials.index",
os.path.join(data_dir, "lpe4-2-a15_strong.pickle"),
os.path.join(data_dir, "lpe4-2-a15_datablock.json"),
"max_lattices=3",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "indexed.refl").if_file()
assert (tmp_path / "indexed.expt").is_file()
experiments_list = load.experiment_list(
tmp_path / "indexed.expt", check_format=False
)
assert len(experiments_list) == 2
# now try to reindex with existing model
result = procrunner.run(
[
"dials.index",
tmp_path / "indexed.expt",
os.path.join(data_dir, "lpe4-2-a15_strong.pickle"),
"max_lattices=2",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "indexed.refl").is_file()
assert (tmp_path / "indexed.expt").is_file()
experiments_list = load.experiment_list(
tmp_path / "indexed.expt", check_format=False
)
assert len(experiments_list) == 2
@pytest.mark.xfel
def test_stills_indexer_multi_lattice_bug_MosaicSauter2014(dials_regression, tmp_path):
"""Problem: In stills_indexer, before calling the refine function, the
experiment list contains a list of dxtbx crystal models (that are not
MosaicSauter2014 models). The conversion to MosaicSauter2014 is made
during the refine step when functions from nave_parameters is called.
If the experiment list contains more than 1 experiment, for eg.
multiple lattices, only the first crystal gets assigned mosaicity. In
actuality, all crystal models should be assigned mosaicity. This test
only compares whether or not all crystal models have been assigned a
MosaicSauter2014 model."""
import dxtbx.model
from dxtbx.model import Crystal
from dxtbx.model.experiment_list import (
Experiment,
ExperimentList,
ExperimentListFactory,
)
from dials.algorithms.indexing.stills_indexer import StillsIndexer
from dials.array_family import flex
from dials.command_line.stills_process import (
phil_scope as stills_process_phil_scope,
)
experiment_data = os.path.join(
dials_regression,
"refinement_test_data",
"cspad_refinement",
"cspad_refined_experiments_step6_level2_300.json",
)
reflection_data = os.path.join(
dials_regression,
"refinement_test_data",
"cspad_refinement",
"cspad_reflections_step7_300.pickle",
)
refl = flex.reflection_table.from_file(reflection_data)
explist = ExperimentListFactory.from_json_file(experiment_data, check_format=False)[
0:2
]
reflist = refl.select(refl["id"] < 2) # Only use the first 2 for convenience
# Construct crystal models that don't have mosaicity. These A,B,C values are the same
# as read in from the dials_regression folder
# Crystal-0
cs0 = Crystal(explist[0].crystal)
exp0 = Experiment(
imageset=explist[0].imageset,
beam=explist[0].beam,
detector=explist[0].detector,
goniometer=None,
scan=None,
crystal=cs0,
)
# Crystal-1
cs1 = Crystal(explist[1].crystal)
exp1 = Experiment(
imageset=explist[1].imageset,
beam=explist[1].beam,
detector=explist[1].detector,
goniometer=None,
scan=None,
crystal=cs1,
)
# Construct a new experiment_list that will be passed on for refinement
unrefined_explist = ExperimentList([exp0, exp1])
# Get default params from stills_process and construct StillsIndexer, then run refinement
params = stills_process_phil_scope.extract()
SI = StillsIndexer(reflist, unrefined_explist, params=params)
refined_explist, new_reflist = SI.refine(unrefined_explist, reflist)
# Now check whether the models have mosaicity after stills_indexer refinement
# Also check that mosaicity values are within expected limits
for ii, crys in enumerate(refined_explist.crystals()):
assert isinstance(crys, dxtbx.model.MosaicCrystalSauter2014)
if ii == 0:
assert crys.get_domain_size_ang() == pytest.approx(2242.0, rel=0.1)
if ii == 1:
assert crys.get_domain_size_ang() == pytest.approx(2689.0, rel=0.1)
@pytest.mark.parametrize(
"indexer_type,fix_cell",
(("sequences", False), pytest.param("stills", True, marks=pytest.mark.xfel)),
)
def test_index_ED_still_low_res_spot_match(
dials_data, tmp_path, indexer_type, fix_cell
):
# test indexing from a single simulated lysozyme ED still
image_path = (
dials_data("image_examples", pathlib=True) / "simtbx_FormatSMVJHSim_001.img"
)
command = ["dials.import", image_path]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
experiment = tmp_path / "imported.expt"
assert experiment.is_file()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
reflections = tmp_path / "strong.refl"
extra_args = [
"indexing.method=low_res_spot_match",
"known_symmetry.space_group=P43212",
"known_symmetry.unit_cell=78.84,78.84,38.29,90,90,90",
"stills.indexer=" + indexer_type,
"n_macro_cycles=2",
"detector.fix_list=Dist",
]
if fix_cell:
extra_args += ["crystal.fix=cell"]
expected_unit_cell = uctbx.unit_cell((78.84, 78.84, 38.29, 90, 90, 90))
expected_rmsds = (0.0065, 0.0065, 0.000)
expected_hall_symbol = " P 4nw 2abw"
run_indexing(
reflections,
experiment,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.mark.parametrize(
"cell_params",
[
(44.47, 52.85, 62.23, 115.14, 101.72, 90.01),
(52.85, 62.23, 44.47, 101.72, 90.01, 115.14),
],
)
def test_unconventional_P1_cell(dials_data, tmp_path, cell_params):
"""
Indexing in P1 should succeed even if the cell parameters are provided in
a non-conventional setting
"""
data_dir = dials_data("mpro_x0305_processed", pathlib=True)
experiment = data_dir / "imported.expt"
reflections = data_dir / "strong.refl"
cell_params_str = ",".join([str(x) for x in cell_params])
extra_args = [
"indexing.method=fft3d",
"known_symmetry.space_group=P1",
"known_symmetry.unit_cell=" + cell_params_str,
]
expected_unit_cell = uctbx.unit_cell(cell_params)
expected_rmsds = (1, 1, 1)
expected_hall_symbol = " P 1"
run_indexing(
reflections,
experiment,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_real_space_grid_search_no_unit_cell(dials_regression, tmp_path):
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "i04_weak_data"
experiments_json = data_dir / "experiments_import.json"
pickle_path = data_dir / "full.pickle"
commands = [
"dials.index",
experiments_json,
pickle_path,
"indexing.method=real_space_grid_search",
]
result = procrunner.run(commands, working_directory=tmp_path)
assert result.stderr
assert (
result.stderr.strip()
== b"Target unit cell must be provided for real_space_grid_search"
)
def test_index_known_orientation(dials_data, tmp_path):
data_dir = dials_data("vmxi_proteinase_k_sweeps", pathlib=True)
experiments_json = data_dir / "experiments_0.json"
reflections = data_dir / "reflections_0.pickle"
expected_unit_cell = uctbx.unit_cell((68.395, 68.395, 104, 90, 90, 90))
expected_rmsds = (0.013, 0.012, 0.008)
expected_hall_symbol = " P 4"
run_indexing(
reflections,
experiments_json,
tmp_path,
[],
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_all_expt_ids_have_expts(dials_data, tmp_path):
result = procrunner.run(
[
"dials.index",
dials_data("vmxi_thaumatin_grid_index", pathlib=True) / "split_07602.expt",
dials_data("vmxi_thaumatin_grid_index", pathlib=True) / "split_07602.refl",
"stills.indexer=sequences",
"indexing.method=real_space_grid_search",
"space_group=P4",
"unit_cell=58,58,150,90,90,90",
"max_lattices=8",
"beam.fix=all",
"detector.fix=all",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "indexed.expt").is_file()
assert (tmp_path / "indexed.refl").is_file()
refl = flex.reflection_table.from_file(tmp_path / "indexed.refl")
expt = ExperimentList.from_file(tmp_path / "indexed.expt", check_format=False)
assert flex.max(refl["id"]) + 1 == len(expt)
| {
"content_hash": "3ad34874261666227a35feaafe0dc262",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 93,
"avg_line_length": 33.53349282296651,
"alnum_prop": 0.6307341085824356,
"repo_name": "dials/dials",
"id": "3d38e512619df6c61f60fd748f3a421499c6b58b",
"size": "28034",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/algorithms/indexing/test_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
} |
"""
Classes and utilities for addresses of various types.
"""
from __future__ import print_function
import struct
import socket
# Slightly tested attempt at Python 3 friendliness
import sys
if 'long' not in sys.modules['__builtin__'].__dict__:
long = int
"""
# Unfinished oui name stuff formerly from packet library.
oui = int(a[0]) << 16 | int(a[1]) << 8 | int(a[2])
# check if globally unique
if resolve_name and not (a[0] & 0x2):
if _ethoui2name.has_key(oui):
return "(%s):%02x:%02x:%02x" %( _ethoui2name[oui], a[3],a[4],a[5])
"""
_eth_oui_to_name = {}
def _load_oui_names ():
import inspect
import os.path
filename = os.path.join(os.path.dirname(inspect.stack()[0][1]), 'oui.txt')
f = None
try:
f = open(filename)
for line in f.readlines():
if len(line) < 1:
continue
if line[0].isspace():
continue
split = line.split(' ')
if not '-' in split[0]:
continue
# grab 3-byte OUI
oui_str = split[0].replace('-','')
# strip off (hex) identifer and keep rest of name
end = ' '.join(split[1:]).strip()
end = end.split('\t')
end.remove('(hex)')
oui_name = ' '.join(end)
# convert oui to int
oui = int(oui_str, 16)
_eth_oui_to_name[oui] = oui_name.strip()
except:
import logging
logging.getLogger().warn("Could not load OUI list")
if f: f.close()
_load_oui_names()
class EthAddr (object):
"""
An Ethernet (MAC) address type.
"""
def __init__ (self, addr):
"""
Understands Ethernet address is various forms. Hex strings, raw byte
strings, etc.
"""
# Always stores as a 6 character string
if isinstance(addr, bytes) or isinstance(addr, basestring):
if len(addr) == 6:
# raw
pass
elif len(addr) == 17 or len(addr) == 12 or addr.count(':') == 5:
# hex
if len(addr) == 17:
if addr[2::3] != ':::::' and addr[2::3] != '-----':
raise RuntimeError("Bad format for ethernet address")
# Address of form xx:xx:xx:xx:xx:xx
# Pick out the hex digits only
addr = ''.join((addr[x*3:x*3+2] for x in xrange(0,6)))
elif len(addr) == 12:
pass
else:
# Assume it's hex digits but they may not all be in two-digit
# groupings (e.g., xx:x:x:xx:x:x). This actually comes up.
addr = ''.join(["%02x" % (int(x,16),) for x in addr.split(":")])
# We should now have 12 hex digits (xxxxxxxxxxxx).
# Convert to 6 raw bytes.
addr = b''.join((chr(int(addr[x*2:x*2+2], 16)) for x in range(0,6)))
else:
raise RuntimeError("Expected ethernet address string to be 6 raw "
"bytes or some hex")
self._value = addr
elif isinstance(addr, EthAddr):
self._value = addr.toRaw()
elif type(addr) == list or (hasattr(addr, '__len__') and len(addr) == 6
and hasattr(addr, '__iter__')):
self._value = b''.join( (chr(x) for x in addr) )
elif addr is None:
self._value = b'\x00' * 6
else:
raise RuntimeError("Expected ethernet address to be a string of 6 raw "
"bytes or some hex")
def isBridgeFiltered (self):
"""
Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address
This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that
have a destination MAC address within this range are not relayed by
bridges conforming to IEEE 802.1D
"""
return ((ord(self._value[0]) == 0x01)
and (ord(self._value[1]) == 0x80)
and (ord(self._value[2]) == 0xC2)
and (ord(self._value[3]) == 0x00)
and (ord(self._value[4]) == 0x00)
and (ord(self._value[5]) <= 0x0F))
@property
def is_bridge_filtered (self):
return self.isBridgeFiltered()
def isGlobal (self):
"""
Returns True if this is a globally unique (OUI enforced) address.
"""
return not self.isLocal()
def isLocal (self):
"""
Returns True if this is a locally-administered (non-global) address.
"""
return True if (ord(self._value[0]) & 2) else False
@property
def is_local (self):
return self.isLocal()
@property
def is_global (self):
return self.isGlobal()
def isMulticast (self):
"""
Returns True if this is a multicast address.
"""
return True if (ord(self._value[0]) & 1) else False
@property
def is_multicast (self):
return self.isMulticast()
def toRaw (self):
"""
Returns the address as a 6-long bytes object.
"""
return self._value
def toTuple (self):
"""
Returns a 6-entry long tuple where each entry is the numeric value
of the corresponding byte of the address.
"""
return tuple((ord(x) for x in self._value))
def toStr (self, separator = ':', resolveNames = False):
"""
Returns the address as string consisting of 12 hex chars separated
by separator.
If resolveNames is True, it may return company names based on
the OUI. (Currently unimplemented)
"""
#TODO: show OUI info from packet lib ?
return separator.join(('%02x' % (ord(x),) for x in self._value))
def __str__ (self):
return self.toStr()
def __cmp__ (self, other):
try:
if type(other) == EthAddr:
other = other._value
elif type(other) == bytes:
pass
else:
other = EthAddr(other)._value
if self._value == other:
return 0
if self._value < other:
return -1
if self._value > other:
return -1
raise RuntimeError("Objects can not be compared?")
except:
return -other.__cmp__(self)
def __hash__ (self):
return self._value.__hash__()
def __repr__ (self):
return self.__class__.__name__ + "('" + self.toStr() + "')"
def __len__ (self):
return 6
def __setattr__ (self, a, v):
if hasattr(self, '_value'):
raise TypeError("This object is immutable")
object.__setattr__(self, a, v)
class IPAddr (object):
"""
Represents an IPv4 address.
"""
def __init__ (self, addr, networkOrder = False):
"""
Initialize using several possible formats
If addr is an int/long, then it is assumed to be in host byte order
unless networkOrder = True
Stored in network byte order as a signed int
"""
# Always stores as a signed network-order int
if isinstance(addr, basestring) or isinstance(addr, bytes):
if len(addr) != 4:
# dotted quad
self._value = struct.unpack('i', socket.inet_aton(addr))[0]
else:
self._value = struct.unpack('i', addr)[0]
elif isinstance(addr, IPAddr):
self._value = addr._value
elif isinstance(addr, int) or isinstance(addr, long):
addr = addr & 0xffFFffFF # unsigned long
self._value = struct.unpack("!i",
struct.pack(('!' if networkOrder else '') + "I", addr))[0]
else:
raise RuntimeError("Unexpected IP address format")
def toSignedN (self):
""" A shortcut """
return self.toSigned(networkOrder = True)
def toUnsignedN (self):
""" A shortcut """
return self.toUnsigned(networkOrder = True)
def toSigned (self, networkOrder = False):
""" Return the address as a signed int """
if networkOrder:
return self._value
v = socket.htonl(self._value & 0xffFFffFF)
return struct.unpack("i", struct.pack("I", v))[0]
def toRaw (self):
"""
Returns the address as a four-character byte string.
"""
return struct.pack("i", self._value)
def toUnsigned (self, networkOrder = False):
"""
Returns the address as an integer in either network or host (the
default) byte order.
"""
if not networkOrder:
return socket.htonl(self._value & 0xffFFffFF)
return self._value & 0xffFFffFF
def toStr (self):
""" Return dotted quad representation """
return socket.inet_ntoa(self.toRaw())
def in_network (self, *args, **kw):
return self.inNetwork(*args, **kw)
def inNetwork (self, network, netmask = None):
"""
Returns True if this network is in the specified network.
network is a dotted quad (with or without a CIDR or normal style
netmask, which can also be specified separately via the netmask
parameter), or it can be a tuple of (address,network-bits) like that
returned by parse_cidr().
"""
if type(network) is not tuple:
if netmask is not None:
network = str(network)
network += "/" + str(netmask)
n,b = parse_cidr(network)
else:
n,b = network
if type(n) is not IPAddr:
n = IPAddr(n)
return (self.toUnsigned() & ~((1 << (32-b))-1)) == n.toUnsigned()
@property
def is_multicast (self):
return ((self.toSigned(networkOrder = False) >> 24) & 0xe0) == 0xe0
@property
def multicast_ethernet_address (self):
"""
Returns corresponding multicast EthAddr
Assumes this is, in fact, a multicast IP address!
"""
if not self.is_multicast:
raise RuntimeError("No multicast EthAddr for non-multicast IPAddr!")
n = self.toUnsigned(networkOrder = False) & 0x7fffff
return EthAddr("01005e" + ("%06x" % (n)))
def __str__ (self):
return self.toStr()
def __cmp__ (self, other):
if other is None: return 1
try:
if not isinstance(other, IPAddr):
other = IPAddr(other)
return cmp(self.toUnsigned(), other.toUnsigned())
except:
return -other.__cmp__(self)
def __hash__ (self):
return self._value.__hash__()
def __repr__ (self):
return self.__class__.__name__ + "('" + self.toStr() + "')"
def __len__ (self):
return 4
def __setattr__ (self, a, v):
if hasattr(self, '_value'):
raise TypeError("This object is immutable")
object.__setattr__(self, a, v)
def netmask_to_cidr (dq):
"""
Takes a netmask as either an IPAddr or a string, and returns the number
of network bits. e.g., 255.255.255.0 -> 24
Raise exception if subnet mask is not CIDR-compatible.
"""
if isinstance(dq, basestring):
dq = IPAddr(dq)
v = dq.toUnsigned(networkOrder=False)
c = 0
while v & 0x80000000:
c += 1
v <<= 1
v = v & 0xffFFffFF
if v != 0:
raise RuntimeError("Netmask %s is not CIDR-compatible" % (dq,))
return c
def cidr_to_netmask (bits):
"""
Takes a number of network bits, and returns the corresponding netmask
as an IPAddr. e.g., 24 -> 255.255.255.0
"""
v = (1 << bits) - 1
v = v << (32-bits)
return IPAddr(v, networkOrder = False)
def parse_cidr (addr, infer=True, allow_host=False):
"""
Takes a CIDR address or plain dotted-quad, and returns a tuple of address
and count-of-network-bits.
Can infer the network bits based on network classes if infer=True.
Can also take a string in the form 'address/netmask', as long as the
netmask is representable in CIDR.
FIXME: This function is badly named.
"""
def check (r0, r1):
a = r0.toUnsigned()
b = r1
if (not allow_host) and (a & ((1<<b)-1)):
raise RuntimeError("Host part of CIDR address is not zero (%s)"
% (addr,))
return (r0,32-r1)
addr = addr.split('/', 2)
if len(addr) == 1:
if infer is False:
return check(IPAddr(addr[0]), 0)
addr = IPAddr(addr[0])
b = 32-infer_netmask(addr)
m = (1<<b)-1
if (addr.toUnsigned() & m) == 0:
# All bits in wildcarded part are 0, so we'll use the wildcard
return check(addr, b)
else:
# Some bits in the wildcarded part are set, so we'll assume it's a host
return check(addr, 0)
try:
wild = 32-int(addr[1])
except:
# Maybe they passed a netmask
m = IPAddr(addr[1]).toUnsigned()
b = 0
while m & (1<<31):
b += 1
m <<= 1
if m & 0x7fffffff != 0:
raise RuntimeError("Netmask " + str(addr[1]) + " is not CIDR-compatible")
wild = 32-b
assert wild >= 0 and wild <= 32
return check(IPAddr(addr[0]), wild)
assert wild >= 0 and wild <= 32
return check(IPAddr(addr[0]), wild)
def infer_netmask (addr):
"""
Uses network classes to guess the number of network bits
"""
addr = addr.toUnsigned()
if addr == 0:
# Special case -- default network
return 32-32 # all bits wildcarded
if (addr & (1 << 31)) == 0:
# Class A
return 32-24
if (addr & (3 << 30)) == 2 << 30:
# Class B
return 32-16
if (addr & (7 << 29)) == 6 << 29:
# Class C
return 32-8
if (addr & (15 << 28)) == 14 << 28:
# Class D (Multicast)
return 32-0 # exact match
# Must be a Class E (Experimental)
return 32-0
IP_ANY = IPAddr("0.0.0.0")
IP_BROADCAST = IPAddr("255.255.255.255")
if __name__ == '__main__':
# A couple sanity checks
#TODO: move to tests
import code
a = IPAddr('255.0.0.1')
for v in [('255.0.0.1',True), (0xff000001, True), (0x010000ff, False)]:
print("== " + str(v) + " =======================")
a = IPAddr(v[0],v[1])
print(a._value,-16777215)
#print(hex(a._value),'ff000001')
print(str(a),'255.0.0.1')
print(hex(a.toUnsigned()),'010000ff')
print(hex(a.toUnsigned(networkOrder=True)),'ff000001')
print(a.toSigned(),16777471)
print(a.toSigned(networkOrder=True),-16777215)
print("----")
print([parse_cidr(x)[1]==24 for x in
["192.168.101.0","192.168.102.0/24","1.1.168.103/255.255.255.0"]])
code.interact(local=locals())
| {
"content_hash": "1aeae40c75c3a499820a58cdf77bf972",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 79,
"avg_line_length": 29.13978494623656,
"alnum_prop": 0.5853874538745387,
"repo_name": "damomeen/pox-datapath",
"id": "bd31090f0b047880747e9c16959ff642e1c03e65",
"size": "14140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pox/lib/addresses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15247"
},
{
"name": "JavaScript",
"bytes": "9135"
},
{
"name": "Python",
"bytes": "1037929"
},
{
"name": "Shell",
"bytes": "373"
}
],
"symlink_target": ""
} |
import socket # Networking support
import signal # Signal support (server shutdown on signal receive)
import time # Current time
import os.path
import shutil
import re
import gvar
class websock:
""" Class describing a simple HTTP server objects."""
def __init__(self, port = 8080, host = ''):
""" Constructor """
self.host = host # <-- works on all avaivable network interfaces
self.port = port
self.www_dir = 'webpage' # Directory where webpage files are stored
#self.www_dir = ''
self.activate_server()
self._wait_for_connections()
def activate_server(self):
""" Attempts to aquire the socket and launch the server """
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try: # user provided in the __init__() port may be unavaivable
print("Launching HTTP server on ", self.host, ":",self.port)
self.socket.bind((self.host, self.port))
except Exception as e:
print ("Warning: Could not aquite port:",self.port,"\n")
print ("I will try a higher port")
# store to user provideed port locally for later (in case 8080 fails)
user_port = self.port
self.port = 8080
try:
print("Launching HTTP server on ", self.host, ":",self.port)
self.socket.bind((self.host, self.port))
except Exception as e:
print("ERROR: Failed to acquire sockets for ports ", user_port, " and 8080. ")
print("Try running the Server in a privileged user mode.")
self.stop()
import sys
sys.exit(1)
print ("Server successfully acquired the socket with port:", self.port)
print ("Press Ctrl+C to shut down the server and exit.")
self._wait_for_connections()
def stop(self):
""" Shut down the server """
try:
print("Shutting down the server")
self.socket.shutdown(socket.SHUT_RDWR)
self.finished.set()
self._Thread__stop()
except Exception as e:
print("Warning: could not shut down the socket. Maybe it was already closed?",e)
def _gen_headers(self, code):
""" Generates HTTP response Headers. Ommits the first line! """
# determine response code
h = ''
if (code == 200):
h = 'HTTP/1.1 200 OK\n'
elif(code == 404):
h = 'HTTP/1.1 404 Not Found\n'
# write further headers
current_date = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
h += 'Date: ' + current_date +'\n'
h += 'Server: Simple-Python-HTTP-Server\n'
h += 'Connection: close\n\n' # signal that the conection wil be closed after complting the request
return h
def _wait_for_connections(self):
""" Main loop awaiting connections """
while True:
print ("Awaiting New connection")
self.socket.listen(3) # maximum number of queued connections
conn, addr = self.socket.accept()
# conn - socket to client
# addr - clients address
print("Got connection from:", addr)
data = conn.recv(1024) #receive data from client
string = bytes.decode(data) #decode it to string
#determine request method (HEAD and GET are supported)
request_method = string.split(' ')[0]
print ("Method: ", request_method)
print ("Request body: ", string)
#if string[0:3] == 'GET':
if (request_method == 'GET') | (request_method == 'HEAD'):
#file_requested = string[4:]
# split on space "GET /file.html" -into-> ('GET','file.html',...)
file_requested = string.split(' ')
file_requested = file_requested[1] # get 2nd element
#Check for URL arguments. Disregard them
file_requested = file_requested.split('?')[0] # disregard anything after '?'
if (file_requested == '/'): # in case no file is specified by the browser
file_requested = '/welcome.html' # load index.html by default
file_requested = file_requested.replace('%20', ' ')
file_requested = self.www_dir + file_requested
print ("Serving web page [",file_requested,"]")
## Load file content
try:
file_handler = open('%s' % file_requested,'rb')
if (request_method == 'GET'): #only read the file when GET
response_content = file_handler.read() # read file content
file_handler.close()
response_headers = self._gen_headers( 200)
except Exception as e: #in case file was not found, generate 404 page
print ("Warning, file not found. Serving response code 404\n", e)
response_headers = self._gen_headers( 404)
if (request_method == 'GET'):
response_content = b"<html><body><p>Error 404: File not found</p><p>Python HTTP server</p></body></html>"
server_response = response_headers.encode() # return headers for GET and HEAD
if (request_method == 'GET'):
server_response += response_content # return additional conten for GET only
conn.send(server_response)
print ("Closing connection with client")
conn.close()
if(request_method == 'POST'):
print "Solicitacao de post"
file_requested = string.split(' ')
file_requested = file_requested[1]
last_line = string.split(" ")[-1:]
parameter = last_line[-1].split('\n')
print parameter[-1]
some_parameters = parameter[-1].split('=')
tipo = some_parameters[0]
print tipo
if tipo == "login":
login = some_parameters[1]
if tipo == "dir":
new_dir = some_parameters[2]
#print new_dir
father_dir = some_parameters[1].split('&')
father_dir = father_dir[0]
father_dir = father_dir.replace("%2F", "/");
father_dir = father_dir.replace("%5C", "/");
#print father_dir
re.sub(r'\W+', '', new_dir)
#father_dir = filter(str.isalnum, father_dir)
#new_dir = filter(str.isalnum, new_dir)
p = os.path.dirname( father_dir + '/' + new_dir+ '/')
print father_dir
print new_dir
if not os.path.exists(p):
os.makedirs(p)
#print "Erro"
#message = 'Novo diretorio cadastrado com sucesso'
if tipo == "delete_dir":
delete_dir = some_parameters[1]
delete_dir = delete_dir.replace("%2F", "/")
delete_dir = delete_dir.replace("%5C", "/")
#print delete_dir
shutil.rmtree(delete_dir)
if tipo == "fileDir":
new_file = some_parameters[2]
file_dir = some_parameters[1].split('&')
file_dir = file_dir[0]
file_dir = file_dir.replace("%2F", "/")
file_dir = file_dir.replace("%5C", "/")
new_file = new_file.replace("%2F", "/")
new_file = new_file.replace("%5C", "/")
new_file = new_file.replace("%3A", ":")
print file_dir
print new_file
if os.path.isfile(new_file):
shutil.copy2(new_file,file_dir)
if tipo == 'file_delete':
file_address = some_parameters[1]
file_address = file_address.replace("%2F", "/")
file_address = file_address.replace("%5C", "/")
file_address = file_address.replace("%3A", ":")
if os.path.isfile(file_address):
os.remove(file_address)
print file_address
file_requested = "Mainpage"
file_requested = self.www_dir + file_requested
print ("Serving web page [",file_requested,"]")
response_content = self.mainpage(login)
response_headers = self._gen_headers( 200)
server_response = response_headers.encode()
server_response += response_content
conn.send(server_response)
print ("Closing connection with client")
conn.close()
else:
print("Unknown HTTP request method:", request_method)
def mainpage(self, str ):
header = """<!DOCTYPE html> <html> <head>Bem vindo, %s!</head>
<head>
<link href="http://fonts.googleapis.com/css?family=Didact+Gothic" rel="stylesheet" />
<link href="/css/default.css" rel="stylesheet" type="text/css" media="all" />
<link href="/css/fonts.css" rel="stylesheet" type="text/css" media="all" />
</head>
<body>
<div id="header" class="container">
<img src="/images/header-bg.jpg"></img>
</div>
<div class="container">
""" %(str)
tree = '<ul>'
file_dict = gvar.file_dict
local_file_dict = {}
remote_file_dict = {}
for files in file_dict:
if gvar.mac in file_dict[files][1]:
local_file_dict.update({files:file_dict[files]})
else:
remote_file_dict.update({files:file_dict[files]})
server_address_string = 'http://%s:%s/' % (gvar.ip, self.port)
tree += '<p>Arquivos locais:</p><div><ul>'
for files in local_file_dict:
tree += '<li><a href="http://%s:8080%s" target="_blank">%s</a></li>' % (gvar.ip, local_file_dict[files][0], local_file_dict[files][0])
tree += '</ul></div>Arquivos remotos<div><ul>'
for files in remote_file_dict:
for remote_mac in remote_file_dict[files][1]:
remote_ip = gvar.peer_dict[remote_mac]
tree += '<li><a href="http://%s:8080%s" target="_blank">%s</a></li>' % (remote_ip[0] ,remote_file_dict[files][0], remote_file_dict[files][0])
tree += '</ul></div>'
#for path, dirs, files in os.walk('./webpage/syncedFiles'):
# if os.sep == '\\':
# lining = path.count('\\')
# else:
# lining = path.count ('/')
# for x in range (0,lining-1):
# tree += '<ul>'
# tree +='<li>'+ os.path.basename(path)
# tree +='<ul>'
#for f in files:
#tree += '<li><a href=http://localhost:8080/'+os.path.basename(path)+'/'+f+' target="_blank">'+f+'</a></li>'
#tree += '<li><a href='+server_address_string+os.path.basename(path)+'/'+f+' target="_blank">'+f+'</a></li>'
# tree += '</li>'
# for x in range (0,lining):
# tree+= '</ul>'
#tree+= '</ul>'
body = """ <div> <p>Criar novo diretorio</p>
<form method="POST" ">
<p>Nome do pai do novo diretorio:<select name="dir">"""
for path, dirs, files in os.walk('./webpage/syncedFiles'):
body +='<option value='+ path + '>' + os.path.basename(path) + '</option>'
body += """\
</select></p>
<p>Nome do novo diretorio:<input type="text"name="dirName"></p>
<input type="submit" value="Submit">
</form>
</div>
<div>
<p>Deletar diretorio</p>
<form method="POST" ">
<p>Diretorio a ser deletado(e todos os arquivos):<select name="delete_dir">"""
for path, dirs, files in os.walk('./webpage/syncedFiles'):
if path != './webpage/syncedFiles':
body +='<option value='+ path + '>' + os.path.basename(path) + '</option>'
body += """\
</select></p>
<input type="submit" value="Submit">
</form>
</div> <div>
<p>Adicionar arquivo</p>
<form " method="post" >
<p>Diretorio onde o arquivo vai ser alocado:</p>
<select name="fileDir">
"""
for path, dirs, files in os.walk('./webpage/syncedFiles'):
body +='<option value='+ path + '>' + os.path.basename(path) + '</option>'
body += """\
</select>
</p>
<input type="text" name="upfile" />
<input type="submit" value="Send" />
</form>
</div>
<div> <p>Deletar arquivo</p>
<form method="POST" ">
<p>Selecione o arquivo a ser deletado:<select name="file_delete">"""
for path, dirs, files in os.walk('./webpage/syncedFiles'):
for f in files:
body +='<option value='+ os.path.abspath(path)+'/'+f+ '>' + os.path.abspath(path)+'/'+f + '</option>'
body += """\
</select></p>
<input type="submit" value="Submit">
</form>
</div>
<div>
<p>Deletar diretorio</p>
<form method="POST" ">
<p>Diretorio a ser deletado(e todos os arquivos):<select name="delete_dir">"""
for path, dirs, files in os.walk('./webpage/syncedFiles'):
if path != './webpage/syncedFiles':
body +='<option value='+ path + '>' + os.path.basename(path) + '</option>'
body += """\
</select></p>
<input type="submit" value="Submit">
</form>
</div>
</div>
</body>
</html>
"""
# <form enctype="multipart/form-data" " method="post" >
return header+tree+body
| {
"content_hash": "5d9bd55845abccdb1d9dc1cfc7634d38",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 154,
"avg_line_length": 40.91316526610644,
"alnum_prop": 0.48623853211009177,
"repo_name": "Gabrielcarvfer/TR22015FS",
"id": "699df18ac11e8bec6f2149b7a71c07b596ce9bca",
"size": "14625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SyncPython/websock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69595"
},
{
"name": "HTML",
"bytes": "27999"
},
{
"name": "Java",
"bytes": "156733"
},
{
"name": "JavaScript",
"bytes": "11574"
},
{
"name": "PLpgSQL",
"bytes": "727"
},
{
"name": "Python",
"bytes": "81805"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
import httmock
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import clever
from clever import importer
json = importer.import_json()
import requests
from httmock import response, HTTMock
def functional_test(auth):
class FunctionalTests(CleverTestCase):
def setUp(self):
super(FunctionalTests, self).setUp()
clever.api_base = os.environ.get('CLEVER_API_BASE', 'https://api.clever.com')
if auth.get("token", None):
clever.set_token(auth["token"])
elif auth.get("api_key", None):
clever.set_api_key(auth["api_key"])
def test_dns_failure(self):
api_base = clever.api_base
try:
clever.api_base = 'https://my-invalid-domain.ireallywontresolve/v1'
self.assertRaises(clever.APIConnectionError, clever.District.all)
finally:
clever.api_base = api_base
def test_list_accessors(self):
district = clever.District.all()[0]
self.assertEqual(district['name'], district.name)
def test_unicode(self):
# Make sure unicode requests can be sent
self.assertRaises(clever.InvalidRequestError, clever.District.retrieve, id=u'☃')
def test_none_values(self):
district = clever.District.all(sort=None)[0]
self.assertTrue(district.id)
def test_missing_id(self):
district = clever.District()
self.assertRaises(clever.InvalidRequestError, district.refresh)
return FunctionalTests
class CleverTestCase(unittest.TestCase):
def setUp(self):
super(CleverTestCase, self).setUp()
clever.api_base = os.environ.get('CLEVER_API_BASE', 'https://api.clever.com')
clever.set_api_key('DEMO_KEY')
class FunctionalTests(CleverTestCase):
def test_dns_failure(self):
api_base = clever.api_base
try:
clever.api_base = 'https://my-invalid-domain.ireallywontresolve/v1'
self.assertRaises(clever.APIConnectionError, clever.District.all)
finally:
clever.api_base = api_base
def test_list_accessors(self):
district = clever.District.all()[0]
self.assertEqual(district['name'], district.name)
def test_iter(self):
for district in clever.District.iter():
self.assertTrue(district.id)
def test_iter_count(self):
r = requests.get('https://api.clever.com/v1.1/students?count=true',
headers={'Authorization': 'Bearer DEMO_TOKEN'})
req_count = json.loads(r.text)["count"]
iter_count = len([x for x in clever.Student.iter()])
self.assertTrue(req_count > clever.Student.ITER_LIMIT)
self.assertEqual(req_count, iter_count)
def test_unsupported_params(self):
self.assertRaises(clever.CleverError, lambda: clever.District.all(page=2))
self.assertRaises(clever.CleverError, lambda: clever.District.all(limit=10))
self.assertRaises(clever.CleverError, lambda: clever.District.all(
starting_after='4fd43cc56d11340000000005'))
self.assertRaises(clever.CleverError, lambda: clever.District.all(
ending_before='4fd43cc56d11340000000005'))
self.assertRaises(clever.CleverError, lambda: clever.District.all(page=2, limit=10))
def test_unicode(self):
# Make sure unicode requests can be sent
self.assertRaises(clever.APIError, clever.District.retrieve, id=u'☃')
def test_none_values(self):
district = clever.District.all(sort=None)[0]
self.assertTrue(district.id)
def test_missing_id(self):
district = clever.District()
self.assertRaises(clever.InvalidRequestError, district.refresh)
def test_keys_and_values_methods(self):
clever_object = clever.CleverObject()
self.assertEqual(clever_object.keys(), set())
self.assertEqual(clever_object.values(), set())
class AuthenticationErrorTest(CleverTestCase):
def test_invalid_credentials(self):
key = clever.get_api_key()
try:
clever.set_api_key('invalid')
clever.District.all()
except clever.AuthenticationError, e:
self.assertEqual(401, e.http_status)
self.assertTrue(isinstance(e.http_body, str))
self.assertTrue(isinstance(e.json_body, dict))
finally:
clever.set_api_key(key)
class InvalidRequestErrorTest(CleverTestCase):
def test_nonexistent_object(self):
try:
clever.District.retrieve('invalid')
except clever.APIError, e:
self.assertEqual(404, e.http_status)
self.assertFalse(isinstance(e.json_body, dict)) # 404 does not have a body
self.assertTrue(isinstance(e.http_body, str))
#generates httmock responses for TooManyRequestsErrorTest
def too_many_requests_content(url, request):
headers = {
'X-Ratelimit-Bucket': 'all, none',
'X-Ratelimit-Limit' : '200, 1200',
'X-Ratelimit-Reset' : '135136, 31634',
'X-Ratelimit-Remaining' : '0, 0'
}
return response(429, "", headers, None, 5, None)
class TooManyRequestsErrorTest(CleverTestCase):
def test_rate_limiter(self):
with HTTMock(too_many_requests_content):
r = requests.get('https://test.rate.limiting')
res = {'body': r.content, 'headers': r.headers, 'code': 429}
APIRequestor = clever.APIRequestor()
self.assertRaises(clever.TooManyRequestsError, lambda : APIRequestor.interpret_response(res))
if __name__ == '__main__':
suite = unittest.TestSuite()
for TestClass in [
functional_test({"api_key": "DEMO_KEY"}),
functional_test({"token": "7f76343d50b9e956138169e8cbb4630bb887b18"}),
AuthenticationErrorTest,
InvalidRequestErrorTest,
TooManyRequestsErrorTest]:
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestClass))
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "8db5ce8bd78ead621d979ed00b360dea",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 99,
"avg_line_length": 33.642857142857146,
"alnum_prop": 0.6955060155697098,
"repo_name": "kvigen/test-drone-hook",
"id": "699badad3caab31067e86df484c4fa3bbba7cae0",
"size": "5680",
"binary": false,
"copies": "1",
"ref": "refs/heads/test-drone-hook",
"path": "test/test_clever.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40974"
},
{
"name": "Shell",
"bytes": "576"
}
],
"symlink_target": ""
} |
import os
import commands
import sys
import shutil
import time
import glob
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TMP = "/tmp"
def buildHelloMap(key):
if not os.path.exists(BUILD_TMP):
os.mkdir(BUILD_TMP)
os.chdir(BUILD_TMP)
build_src = BUILD_TMP +"/HelloMap"
if os.path.exists(build_src):
shutil.rmtree(build_src)
os.system('cordova create HelloMap com.example.hellomap HelloMap')
os.chdir(build_src)
os.system('cordova platform add android')
os.system('cordova plugin add %s/../../tools/cordova-plugin-crosswalk-webview' % SCRIPT_DIR)
os.system('cordova plugin add cordova-plugin-googlemaps --variable API_KEY_FOR_ANDROID="%s"' % key)
shutil.copyfile(SCRIPT_DIR + '/index.html', build_src + '/www/index.html')
# Update android:theme="@android:style/Theme.Black.NoTitleBar" to android:theme="@android:style/Theme.Translucent.NoTitleBar" in AndroidManifest.xml
os.system('sed -i "s/%s/%s/g" %s' % ("@android:style\/Theme.Black.NoTitleBar", "@android:style\/Theme.Translucent.NoTitleBar", build_src + "/platforms/android/AndroidManifest.xml"))
# Set zOrderOnTop in config.xml
lines = open(build_src + '/config.xml', 'r').readlines()
lines.insert(-1, ' <preference name="xwalkZOrderOnTop" value="true" />\n')
f = open(build_src + '/config.xml', 'w')
f.writelines(lines)
f.close()
# Workaround for zOrderOnTop
googlemapjava = build_src + "/platforms/android/src/plugin/google/maps/GoogleMaps.java"
if os.path.exists(googlemapjava):
file = open(googlemapjava, 'r')
lines = open(googlemapjava, 'r').readlines()
# Add new code postion flag
import_pos = 0
showdialog_pos = 0
resizemap_pos = len(lines)
insert1_pos = 0
insert2_pos = 0
for (num, value) in enumerate(file):
if value.find("import com.google.android.gms.maps.model.VisibleRegion;") != -1:
import_pos = num
elif value.find("private void showDialog") != -1:
showdialog_pos = num
elif value.find("private void resizeMap") != -1:
resizemap_pos = num
# Workaroundorkaround code should be added to the behind of GoogleMaps.this.onMapEvent("map_close") in showDialog()
elif value.find("GoogleMaps.this.onMapEvent(\"map_close\");") != -1 and num > showdialog_pos and num < resizemap_pos:
insert1_pos = num
# Workaround code should be added to the behind of callbackContext.success(); in showDialog()
elif value.find("callbackContext.success();") != -1 and num > showdialog_pos and num < resizemap_pos:
insert2_pos = num
# Add workaround code by desc
lines.insert(insert2_pos + 1, "\n XWalkCordovaView view = (XWalkCordovaView) webView.getView();\n")
lines.insert(insert2_pos + 2, " view.setZOrderOnTop(false);\n")
lines.insert(insert1_pos + 1, "\n XWalkCordovaView view = (XWalkCordovaView) webView.getView();\n")
lines.insert(insert1_pos + 2, " view.setZOrderOnTop(true);\n")
lines.insert(import_pos + 1, "import org.crosswalk.engine.XWalkCordovaView;\n")
file = open(googlemapjava, 'w')
file.writelines(lines)
file.close()
os.system('cordova build android')
time.sleep(5)
files = glob.glob(os.path.join(build_src + "/platforms/android/build/outputs/apk", "*-debug.apk"))
if len(files) == 0:
print("No apk build in %s/platforms/android/build/outputs/apk" % build_src)
return
for apk in files:
shutil.copy2(apk, SCRIPT_DIR)
def main():
try:
usage = "Usage: ./google-maps-plugin.py -k <key>"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-k",
"--key",
dest="key",
help="Google Maps API key")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
if not BUILD_PARAMETERS.key:
print("Google Maps API key is missing.")
sys.exit(1)
buildHelloMap(BUILD_PARAMETERS.key)
except Exception as e:
print "Got wrong options: %s, exit ..." % e
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "0e28733b2572596ed72218c8ea52b753",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 185,
"avg_line_length": 43.40594059405941,
"alnum_prop": 0.625,
"repo_name": "wanghongjuan/crosswalk-test-suite",
"id": "45cae2dc84e5a9df5e967c225c5fe6fb3be4af29",
"size": "5930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cordova/cordova-sampleapp-android-tests/sampleapp/res/GoogleMapsPlugin/google-maps-plugin.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "28136"
},
{
"name": "C#",
"bytes": "1709"
},
{
"name": "CSS",
"bytes": "72640"
},
{
"name": "Cucumber",
"bytes": "139208"
},
{
"name": "GLSL",
"bytes": "6990"
},
{
"name": "Groff",
"bytes": "16"
},
{
"name": "HTML",
"bytes": "29712226"
},
{
"name": "Java",
"bytes": "1351160"
},
{
"name": "JavaScript",
"bytes": "2732532"
},
{
"name": "Logos",
"bytes": "16"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "37474"
},
{
"name": "Python",
"bytes": "1931641"
},
{
"name": "Shell",
"bytes": "593838"
}
],
"symlink_target": ""
} |
import json
import urllib2
KUBELET_DEFAULT_PORT = 10255
KUBELET_DEFAULT_HOST = "localhost"
class Kubelet(object):
def list_pods(self):
url = "http://{}:{}/pods".format(KUBELET_DEFAULT_HOST, KUBELET_DEFAULT_PORT)
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError as err:
return None, err
try:
return json.load(response), None
except (TypeError, ValueError) as e:
return None, e
| {
"content_hash": "2aa3fb134609ff17e7494d3f324f6c4b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 84,
"avg_line_length": 24.3,
"alnum_prop": 0.6069958847736625,
"repo_name": "Yelp/fullerite",
"id": "723b0c5e266a2e0237074174c88ec8ad91840350",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/diamond/collectors/jolokia/kubernetes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "524992"
},
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "5519"
},
{
"name": "Python",
"bytes": "1298809"
},
{
"name": "Roff",
"bytes": "19699"
},
{
"name": "Shell",
"bytes": "11159"
}
],
"symlink_target": ""
} |
import pylab as PL
import ttk
from Tkinter import *
from ttk import Notebook
class GUI:
## GUI variables
titleText = 'PyCX Simulator' # window title
timeInterval = 0 # refresh time in milliseconds
running = False
modelFigure = None
stepSize = 1
currentStep = 0
def __init__(self,title='PyCX Simulator',interval=0,stepSize=1,parameterSetters=[]):
self.titleText = title
self.timeInterval = interval
self.stepSize = stepSize
self.parameterSetters = parameterSetters
self.varEntries = {}
self.statusStr = ""
self.initGUI()
def initGUI(self):
#create root window
self.rootWindow = Tk()
self.statusText = StringVar(value=self.statusStr)
self.setStatusStr("Simulation not yet started")
self.rootWindow.wm_title(self.titleText)
self.rootWindow.protocol('WM_DELETE_WINDOW',self.quitGUI)
self.rootWindow.geometry('550x400')
self.rootWindow.columnconfigure(0, weight=1)
self.rootWindow.rowconfigure(0, weight=1)
self.notebook = Notebook(self.rootWindow)
self.notebook.grid(row=0,column=0,padx=2,pady=2,sticky='nswe')
self.frameRun = Frame()
self.frameSettings = Frame()
self.frameParameters = Frame()
self.frameInformation = Frame()
self.notebook.add(self.frameRun,text="Run")
self.notebook.add(self.frameSettings,text="Settings")
self.notebook.add(self.frameParameters,text="Parameters")
self.notebook.add(self.frameInformation,text="Info")
self.notebook.pack(expand=YES, fill=BOTH, padx=5, pady=5 ,side=TOP)
self.status = Label(self.rootWindow, width=40,height=3, relief=SUNKEN, bd=1,textvariable=self.statusText)
self.status.grid(row=1,column=0,padx=2,pady=2,sticky='nswe')
self.status.pack(side=TOP, fill=X, padx=1, pady=1, expand=NO)
self.runPauseString = StringVar()
self.runPauseString.set("Run")
self.buttonRun = Button(self.frameRun,width=30,height=2,textvariable=self.runPauseString,command=self.runEvent)
self.buttonRun.pack(side=TOP, padx=5, pady=5)
self.showHelp(self.buttonRun,"Runs the simulation (or pauses the running simulation)")
self.buttonStep = Button(self.frameRun,width=30,height=2,text='Step Once',command=self.stepOnce)
self.buttonStep.pack(side=TOP, padx=5, pady=5)
self.showHelp(self.buttonStep,"Steps the simulation only once")
self.buttonReset = Button(self.frameRun,width=30,height=2,text='Reset',command=self.resetModel)
self.buttonReset.pack(side=TOP, padx=5, pady=5)
self.showHelp(self.buttonReset,"Resets the simulation")
can = Canvas(self.frameSettings)
lab = Label(can, width=25,height=1,text="Step size ", justify=LEFT, anchor=W,takefocus=0)
lab.pack(side='left')
self.stepScale = Scale(can,from_=1, to=50, resolution=1,command=self.changeStepSize,orient=HORIZONTAL, width=25,length=150)
self.stepScale.set(self.stepSize)
self.showHelp(self.stepScale,"Skips model redraw during every [n] simulation steps\nResults in a faster model run.")
self.stepScale.pack(side='left')
can.pack(side='top')
can = Canvas(self.frameSettings)
lab = Label(can, width=25,height=1,text="Step visualization delay in ms ", justify=LEFT, anchor=W,takefocus=0)
lab.pack(side='left')
self.stepDelay = Scale(can,from_=0, to=max(2000,self.timeInterval), resolution=10,command=self.changeStepDelay,orient=HORIZONTAL, width=25,length=150)
self.stepDelay.set(self.timeInterval)
self.showHelp(self.stepDelay,"The visualization of each step is delays by the given number of milliseconds.")
self.stepDelay.pack(side='left')
can.pack(side='top')
scrollInfo = Scrollbar(self.frameInformation)
self.textInformation = Text(self.frameInformation, width=45,height=13,bg='lightgray',wrap=WORD,font=("Courier",10))
scrollInfo.pack(side=RIGHT, fill=Y)
self.textInformation.pack(side=LEFT,fill=BOTH,expand=YES)
scrollInfo.config(command=self.textInformation.yview)
self.textInformation.config(yscrollcommand=scrollInfo.set)
for variableSetter in self.parameterSetters:
can = Canvas(self.frameParameters)
lab = Label(can, width=25,height=1,text=variableSetter.__name__+" ",anchor=W,takefocus=0)
lab.pack(side='left')
ent = Entry(can, width=11)
ent.insert(0, str(variableSetter()))
if variableSetter.__doc__ != None and len(variableSetter.__doc__) > 0:
self.showHelp(ent,variableSetter.__doc__.strip())
ent.pack(side='left')
can.pack(side='top')
self.varEntries[variableSetter]=ent
if len(self.parameterSetters) > 0:
self.buttonSaveParameters = Button(self.frameParameters,width=50,height=1,command=self.saveParametersCmd,text="Save parameters to the running model",state=DISABLED)
self.showHelp(self.buttonSaveParameters,"Saves the parameter values.\nNot all values may take effect on a running model\nA model reset might be required.")
self.buttonSaveParameters.pack(side='top',padx=5,pady=5)
self.buttonSaveParametersAndReset = Button(self.frameParameters,width=50,height=1,command=self.saveParametersAndResetCmd,text="Save parameters to the model and reset the model")
self.showHelp(self.buttonSaveParametersAndReset,"Saves the given parameter values and resets the model")
self.buttonSaveParametersAndReset.pack(side='top',padx=5,pady=5)
def setStatusStr(self,newStatus):
self.statusStr = newStatus
self.statusText.set(self.statusStr)
#model control functions
def changeStepSize(self,val):
self.stepSize = int(val)
def changeStepDelay(self,val):
self.timeInterval= int(val)
def saveParametersCmd(self):
for variableSetter in self.parameterSetters:
variableSetter(float(self.varEntries[variableSetter].get()))
self.setStatusStr("New parameter values have been set")
def saveParametersAndResetCmd(self):
self.saveParametersCmd()
self.resetModel()
def runEvent(self):
self.running = not self.running
if self.running:
self.rootWindow.after(self.timeInterval,self.stepModel)
self.runPauseString.set("Pause")
self.buttonStep.configure(state=DISABLED)
self.buttonReset.configure(state=DISABLED)
if len(self.parameterSetters) > 0:
self.buttonSaveParameters.configure(state=NORMAL)
self.buttonSaveParametersAndReset.configure(state=DISABLED)
else:
self.runPauseString.set("Continue Run")
self.buttonStep.configure(state=NORMAL)
self.buttonReset.configure(state=NORMAL)
if len(self.parameterSetters) > 0:
self.buttonSaveParameters.configure(state=NORMAL)
self.buttonSaveParametersAndReset.configure(state=NORMAL)
def stepModel(self):
if self.running:
self.modelStepFunc()
self.currentStep += 1
self.setStatusStr("Step "+str(self.currentStep))
self.status.configure(foreground='black')
if (self.currentStep) % self.stepSize == 0:
self.drawModel()
self.rootWindow.after(int(self.timeInterval*1.0/self.stepSize),self.stepModel)
def stepOnce(self):
self.running = False
self.runPauseString.set("Continue Run")
self.modelStepFunc()
self.currentStep += 1
self.setStatusStr("Step "+str(self.currentStep))
self.drawModel()
if len(self.parameterSetters) > 0:
self.buttonSaveParameters.configure(state=NORMAL)
def resetModel(self):
self.running = False
self.runPauseString.set("Run")
self.modelInitFunc()
self.currentStep = 0;
self.setStatusStr("Model has been reset")
self.drawModel()
def drawModel(self):
if self.modelFigure == None or self.modelFigure.canvas.manager.window == None:
self.modelFigure = PL.figure()
PL.ion()
self.modelDrawFunc()
self.modelFigure.canvas.manager.window.update()
def start(self,func=[]):
if len(func)==3:
self.modelInitFunc = func[0]
self.modelDrawFunc = func[1]
self.modelStepFunc = func[2]
if (self.modelStepFunc.__doc__ != None and len(self.modelStepFunc.__doc__)>0):
self.showHelp(self.buttonStep,self.modelStepFunc.__doc__.strip())
if (self.modelInitFunc.__doc__ != None and len(self.modelInitFunc.__doc__)>0):
self.textInformation.config(state=NORMAL)
self.textInformation.delete(1.0, END)
self.textInformation.insert(END, self.modelInitFunc.__doc__.strip())
self.textInformation.config(state=DISABLED)
self.modelInitFunc()
self.drawModel()
self.rootWindow.mainloop()
def quitGUI(self):
PL.close('all')
self.rootWindow.quit()
self.rootWindow.destroy()
def showHelp(self, widget,text):
def setText(self):
self.statusText.set(text)
self.status.configure(foreground='blue')
def showHelpLeave(self):
self.statusText.set(self.statusStr)
self.status.configure(foreground='black')
widget.bind("<Enter>", lambda e : setText(self))
widget.bind("<Leave>", lambda e : showHelpLeave(self))
| {
"content_hash": "86b139a7cbf413b81da3c00f922c0aef",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 189,
"avg_line_length": 45.620535714285715,
"alnum_prop": 0.6234465211860261,
"repo_name": "nalyd88/modeling-and-simulation",
"id": "489cd9aba3797310f6d67e5d2fb143ec3580e6fc",
"size": "10732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "practice/pycxsimulator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55462"
},
{
"name": "TeX",
"bytes": "56000"
}
],
"symlink_target": ""
} |
from tempest.lib.services.compute import assisted_volume_snapshots_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestVolumesClient(base.BaseServiceTest):
FAKE_SNAPSHOT = {
"id": "bf7b810c-70df-4c64-88a7-8588f7a6739c",
"volumeId": "59f17c4f-66d4-4271-be40-f200523423a9"
}
def setUp(self):
super(TestVolumesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = assisted_volume_snapshots_client.\
AssistedVolumeSnapshotsClient(fake_auth, 'compute', 'regionOne')
def _test_create_assisted_volume_snapshot(self, bytes_body=False):
kwargs = {"type": "qcow2", "new_file": "fake_name"}
self.check_service_client_function(
self.client.create_assisted_volume_snapshot,
'tempest.lib.common.rest_client.RestClient.post',
{"snapshot": self.FAKE_SNAPSHOT},
bytes_body, status=200, volume_id=self.FAKE_SNAPSHOT['volumeId'],
snapshot_id=self.FAKE_SNAPSHOT['id'], **kwargs)
def test_create_assisted_volume_snapshot_with_str_body(self):
self._test_create_assisted_volume_snapshot()
def test_create_assisted_volume_snapshot_with_byte_body(self):
self._test_create_assisted_volume_snapshot(bytes_body=True)
def test_delete_assisted_volume_snapshot(self):
self.check_service_client_function(
self.client.delete_assisted_volume_snapshot,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=204, volume_id=self.FAKE_SNAPSHOT['volumeId'],
snapshot_id=self.FAKE_SNAPSHOT['id'])
| {
"content_hash": "833eae7c29c9863c55202dba913ce72d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 43.38461538461539,
"alnum_prop": 0.6778959810874704,
"repo_name": "openstack/tempest",
"id": "79855ea25cf91f2832ddf371138b6ae348a0cc56",
"size": "2290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5364077"
},
{
"name": "Shell",
"bytes": "8684"
}
],
"symlink_target": ""
} |
import github
from github_accounts import accounts
token = accounts["social"]
client = github.Github(token, per_page=100)
screen_name = "torvalds"
repository_name = "linux"
user = client.get_user(screen_name)
repo = user.get_repo(repository_name)
user_count = 0
for user in repo.get_stargazers():
print(user_count, user.name)
user_count += 1
if user_count == 10:
break | {
"content_hash": "63589cebc04d4658f044745c777d3133",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 43,
"avg_line_length": 18,
"alnum_prop": 0.6944444444444444,
"repo_name": "bmtgoncalves/TorinoCourse",
"id": "096c4d90ad2d1f0082be385ff4c8305b7af79f6c",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lecture III/github_stargazers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52819"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from django.contrib.auth.models import Group
from django.test.utils import get_runner
from django.utils import six
from django_performance_testing import test_runner as djpt_test_runner_module
from django_performance_testing.serializer import Reader
from freezegun import freeze_time
import pytest
from testapp.test_helpers import (override_current_context,
run_testcases_with_django_runner)
import unittest
import re
def to_dotted_name(cls):
return '.'.join([cls.__module__, cls.__name__])
class MyTestSuite(object):
def addTest(self, test):
pass
class MyTestRunner(object):
pass
class MyDjangoTestRunner(object):
test_runner = MyTestRunner
test_suite = MyTestSuite
@pytest.mark.parametrize('runner_cls_name,test_runner_cls,test_suite_cls', [
('django.test.runner.DiscoverRunner',
unittest.TextTestRunner, unittest.TestSuite),
(to_dotted_name(MyDjangoTestRunner),
MyTestRunner, MyTestSuite),
], ids=['vanilla runner', 'custom runner'])
def test_runner_keeps_default_classes_in_inheritance_chain(
settings, runner_cls_name, test_runner_cls, test_suite_cls):
settings.TEST_RUNNER = runner_cls_name
django_runner_cls = get_runner(settings)
def assert_is_djpt_mixin(cls, base_cls, mixin_base_name):
fullname = 'django_performance_testing.test_runner.{}'.format(
mixin_base_name)
mixin_cls_name = '{}Mixin'.format(mixin_base_name)
mixin_cls = getattr(djpt_test_runner_module, mixin_cls_name)
assert fullname == to_dotted_name(cls)
assert issubclass(cls, mixin_cls)
assert cls.__mro__[1] == mixin_cls
if any(isinstance(base_cls, str_tp) for str_tp in six.string_types):
assert base_cls == to_dotted_name(cls.__mro__[2])
elif isinstance(base_cls, type):
assert issubclass(cls, base_cls)
assert cls.__mro__[2] == base_cls
else:
raise NotImplementedError(
'Cannot handle type {}'.format(type(base_cls)))
assert_is_djpt_mixin(
cls=django_runner_cls, base_cls=runner_cls_name,
mixin_base_name='DjptDjangoTestRunner')
assert_is_djpt_mixin(
cls=django_runner_cls.test_runner, base_cls=test_runner_cls,
mixin_base_name='DjptTestRunner')
assert django_runner_cls.test_suite == test_suite_cls
def test_runner_sets_executing_test_method_as_context():
class SomeTestCase(unittest.TestCase):
def test_foo(self):
assert 'test name' in ctx.data, ctx.data.keys()
tests = ctx.data['test name']
assert len(tests) == 1
assert [str(self)] == tests
with override_current_context() as ctx:
run_testcases_with_django_runner(SomeTestCase, nr_of_tests=1)
def test_collected_results_serialized_to_settings_based_filename(
settings, tmpfilepath):
class SomeTestCase(unittest.TestCase):
def test_foo(self):
assert 'test name' in ctx.data, ctx.data.keys()
tests = ctx.data['test name']
assert len(tests) == 1
assert [str(self)] == tests
settings.DJPT_DATAFILE_PATH = tmpfilepath
with override_current_context() as ctx:
run_testcases_with_django_runner(SomeTestCase, nr_of_tests=1)
reader = Reader(settings.DJPT_DATAFILE_PATH)
assert [] != reader.read_all()
class FailsDbLimit(object):
limit_type = 'queries'
limit_value = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return None
def code_that_fails(self):
assert len(Group.objects.all()) == 0
class FailsTimeLimit(object):
limit_type = 'time'
limit_value = 4
def __enter__(self):
self.freeze_ctx_mgr = freeze_time('2016-09-29 18:18:01')
self.frozen_time = self.freeze_ctx_mgr.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self.freeze_ctx_mgr.__exit__(exc_type, exc_val, exc_tb)
def code_that_fails(self):
self.frozen_time.tick(timedelta(seconds=5))
@pytest.mark.parametrize(
'ran_test_delta,limit_name,method_name,limit_failer_cls,is_cls_fn', [
(1, 'test method', 'test_foo', FailsDbLimit, False),
(1, 'test method', 'test_foo', FailsTimeLimit, False),
(0, 'test setUp', 'setUp', FailsDbLimit, False),
(0, 'test setUp', 'setUp', FailsTimeLimit, False),
(0, 'test tearDown', 'tearDown', FailsDbLimit, False),
(0, 'test tearDown', 'tearDown', FailsTimeLimit, False),
(-1, 'test setUpClass', 'setUpClass', FailsDbLimit, True),
(-1, 'test setUpClass', 'setUpClass', FailsTimeLimit, True),
(0, 'test tearDownClass', 'tearDownClass', FailsDbLimit, True),
(0, 'test tearDownClass', 'tearDownClass', FailsTimeLimit, True),
])
def test_limits_can_be_set_on_testcase_methods(db, settings, limit_name,
ran_test_delta, method_name,
limit_failer_cls, is_cls_fn):
failer = limit_failer_cls()
settings.PERFORMANCE_LIMITS = {
limit_name: {
failer.limit_type: {
'total': failer.limit_value
}
}
}
with failer:
class ATestCase(unittest.TestCase):
def test_default(self):
pass
called_do_stuff = False
def do_stuff(*a, **kw):
ATestCase.called_do_stuff = True
failer.code_that_fails()
if is_cls_fn:
setattr(ATestCase, method_name, classmethod(do_stuff))
else:
setattr(ATestCase, method_name, do_stuff)
nr_of_tests = 1 + ran_test_delta
test_run = run_testcases_with_django_runner(
ATestCase, nr_of_tests=nr_of_tests,
all_should_pass=False)
assert ATestCase.called_do_stuff, test_run['output']
parts = test_run['output'].split('LimitViolationError: ')
assert len(parts) == 2, 'has LimitViolationError in the output'
lve_msg = parts[-1].split('FAILED (')[0].split(' File "')[0]
lve_msg_oneline = ''.join(lve_msg.split('\n'))
lve_msg = re.sub(r"'+\s+'", '', lve_msg_oneline)
# e.g.: Too many (1) total queries (for test method) (limit: 0) {'test name': ['test_foo (testapp.tests.test_integrates_with_django_testrunner.ATestCase)']} # noqa: E501
reported_method = lve_msg.split('[')[-1].split(']')[0][1:-1]
assert reported_method.startswith(method_name), lve_msg
assert ATestCase.__name__ in reported_method, lve_msg
| {
"content_hash": "00d6e75daa1aa146501fbf256819f17c",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 174,
"avg_line_length": 36.03243243243243,
"alnum_prop": 0.6231623162316232,
"repo_name": "PaesslerAG/django-performance-testing",
"id": "fccc5af1b343d9014abf5f13c55fe4698e2d77b6",
"size": "6666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/tests/test_integrates_with_django_testrunner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2146"
},
{
"name": "Python",
"bytes": "120833"
}
],
"symlink_target": ""
} |
import requests
class JsonParser(object):
def __init__(self, json_cont=None, url=None):
if url is not None:
self.url = url
self.json_cont_dictionary = requests.get(url).json()
self.json_cont_dictionary = json_cont
def parse(self, json_cont_dict=None):
if json_cont_dict is None:
json_cont_dictionary = self.json_cont_dictionary
else:
json_cont_dictionary = json_cont_dict
res_data = {}
if 'nextUrl' in json_cont_dictionary.keys():
nextUrl = json_cont_dictionary['nextUrl']
else:
nextUrl = None
# keyword = "java"
# count = json_cont_dictionary['count']
resultItemList = json_cont_dictionary['resultItemList']
for item in resultItemList:
# jid is the job_unique id
jid = item['detailUrl'].split('/')[6].split('?')[0].encode('utf-8')
res_data[jid] = {}
res_data[jid]['company'] = item['company'].encode('utf-8')
res_data[jid]['date'] = item['date'].encode('utf-8')
res_data[jid]['jobTitle'] = item['jobTitle'].encode('utf-8')
res_data[jid]['location'] = item['location'].encode('utf-8')
res_data[jid]['detailUrl'] = item['detailUrl'].encode('utf-8')
return res_data, nextUrl
| {
"content_hash": "8c7bf8dc13e8f45033986fdfff38eb5c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 34.717948717948715,
"alnum_prop": 0.5576070901033974,
"repo_name": "guanxin0206/dice_crawler",
"id": "b8c3589e5d5a17b0b6c28f5c647225c912a1f2b0",
"size": "1403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dice_spider_2/spider/json_parser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "65"
},
{
"name": "Python",
"bytes": "59127"
}
],
"symlink_target": ""
} |
from . import base
from . import mixins
from datetime import date
from .. import cleaver
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'Last Name',
'first_name': 'First Name',
'department': 'Department Title',
'job_title': 'Assignment Title',
'employee_type': 'Full Part Time',
'hire_date': 'Hire Date',
'compensation': 'Annual Salary',
'gender': 'Sex',
'white': 'White',
'black': 'Black or African American',
'asian': 'Asian',
'native': 'American Indian or Alaskan Native',
'hawaiian': 'Hawaiian Pacific Islander',
'hispanic': 'Of Hispanic or Latino Descent',
}
hispanic_map = {'Yes': 'Hispanic', '': 'Non-Hispanic'}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'Bryan ISD'
ORGANIZATION_CLASSIFICATION = 'School District'
DATE_PROVIDED = date(2016, 6, 9)
URL = ('http://raw.texastribune.org.s3.amazonaws.com/'
'bryan_isd/salaries/2016-06/bryan-isd.xlsx')
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def compensation_type(self):
employee_type = self.get_mapped_value('employee_type')
if employee_type == 'F':
return "FT"
if employee_type == 'P':
return "PT"
return "FT"
@property
def description(self):
employee_type = self.get_mapped_value('employee_type')
if employee_type == 'F':
return "Annual Salary"
if employee_type == 'P':
return "Part-time annual salary"
return "Annual Salary"
@property
def race(self):
races = [self.get_mapped_value('white'),self.get_mapped_value('black'),
self.get_mapped_value('asian'),self.get_mapped_value('native'),
self.get_mapped_value('hawaiian')]
raceNames = ['White','Black or African American','Asian',
'American Indian or Alaskan Native','Hawaiian Pacific Islander']
ethnicity = self.hispanic_map[self.hispanic.strip()]
i = 0
raceList = []
for indivRace in races:
if indivRace == 'X':
raceList.append(raceNames[i])
i += 1
if len(raceList) > 1:
return {
'name': 'Two or more races, ' + ethnicity
}
elif len(raceList) == 0:
return {
'name': 'Not given'
}
else:
return {
'name': raceList[0] + ', ' + ethnicity
}
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender.strip()
}
return r
def calculate_tenure(self):
hire_date_data = map(int, self.hire_date.split('-'))
hire_date = date(hire_date_data[0], hire_date_data[1],
hire_date_data[2])
tenure = float((self.DATE_PROVIDED - hire_date).days) / float(360)
if tenure < 0:
return 0
return tenure
transform = base.transform_factory(TransformedRecord)
| {
"content_hash": "d7bca887019fb42e6bec6564643264ea",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 28.921259842519685,
"alnum_prop": 0.5624829839368364,
"repo_name": "texastribune/tx_salaries",
"id": "b8d2a260d7f2808612ec0fc80b0fbc97beffa89e",
"size": "3673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tx_salaries/utils/transformers/bryan_isd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "309280"
},
{
"name": "Ruby",
"bytes": "191"
}
],
"symlink_target": ""
} |
import importlib
import logging
import os
import pkgutil
import sys
import appdirs
from neon.util.compat import pickle, pickle_load
logger = logging.getLogger(__name__)
def get_cache_dir(subdir=None):
"""
Function for getting cache directory to store reused files like kernels, or scratch space
for autotuning, etc.
"""
cache_dir = os.environ.get("NEON_CACHE_DIR")
if cache_dir is None:
cache_dir = appdirs.user_cache_dir("neon", "neon")
if subdir:
subdir = subdir if isinstance(subdir, list) else [subdir]
cache_dir = os.path.join(cache_dir, *subdir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
def get_data_cache_dir(data_dir, subdir=None):
"""
Function for getting cache directory to store data cache files.
Since the data cache contains large files, it is ideal to control the
location independently from the system cache, which defaults to
the user homedir if not otherwise specified.
"""
data_cache_dir = os.environ.get("NEON_DATA_CACHE_DIR")
if data_cache_dir is None:
data_cache_dir = data_dir
if subdir:
subdir = subdir if isinstance(subdir, list) else [subdir]
data_cache_dir = os.path.join(data_cache_dir, *subdir)
if not os.path.exists(data_cache_dir):
os.makedirs(data_cache_dir)
return data_cache_dir
def ensure_dirs_exist(path):
"""
Simple helper that ensures that any directories specified in the path are
created prior to use.
Arguments:
path (str): the path (may be to a file or directory). Any intermediate
directories will be created.
Returns:
str: The unmodified path value.
"""
outdir = os.path.dirname(path)
if outdir != '' and not os.path.isdir(outdir):
os.makedirs(outdir)
return path
def save_obj(obj, save_path):
"""
Dumps a python data structure to a saved on-disk representation. We
currently support writing to the following file formats (expected filename
extension in brackets):
* python pickle (.pkl)
Arguments:
obj (object): the python object to be saved.
save_path (str): Where to write the serialized object (full path and
file name)
See Also:
:py:func:`~neon.models.model.Model.serialize`
"""
if save_path is None or len(save_path) == 0:
return
save_path = os.path.expandvars(os.path.expanduser(save_path))
logger.debug("serializing object to: %s", save_path)
ensure_dirs_exist(save_path)
pickle.dump(obj, open(save_path, 'wb'), 2)
def load_obj(load_path):
"""
Loads a saved on-disk representation to a python data structure. We
currently support the following file formats:
* python pickle (.pkl)
Arguments:
load_path (str): where to the load the serialized object (full path
and file name)
"""
if isinstance(load_path, str):
load_path = os.path.expandvars(os.path.expanduser(load_path))
if load_path.endswith('.gz'):
import gzip
load_path = gzip.open(load_path, 'rb')
else:
load_path = open(load_path, 'rb')
fname = load_path.name
logger.debug("deserializing object from: %s", fname)
try:
return pickle_load(load_path)
except AttributeError:
msg = ("Problems deserializing: %s. Its possible the interface "
"for this object has changed since being serialized. You "
"may need to remove and recreate it." % load_path)
logger.error(msg)
raise AttributeError(msg)
def load_class(ctype):
"""
Helper function to take a string with the neon module and
classname then import and return the class object
Arguments:
ctype (str): string with the neon module and class
(e.g. 'neon.layers.layer.Linear')
Returns:
class
"""
# extract class name and import neccessary module.
class_path = ctype
parts = class_path.split('.')
module = '.'.join(parts[:-1])
try:
clss = __import__(module)
for comp in parts[1:]:
clss = getattr(clss, comp)
return clss
except (ValueError, ImportError) as err:
if len(module) == 0:
# try to find the module inside neon
pkg = sys.modules['neon']
prfx = pkg.__name__ + '.'
for imptr, nm, _ in pkgutil.iter_modules(pkg.__path__, prefix=prfx):
mod = importlib.import_module(nm)
if hasattr(mod, ctype):
return getattr(mod, ctype)
raise err
def serialize(model, callbacks=None, datasets=None, dump_weights=True, keep_states=True):
"""
Serialize the model, callbacks and datasets.
Arguments:
model (Model): Model object
callbacks (Callbacks, optional): Callbacks
datasets (iterable, optional): Datasets
dump_weights (bool, optional): Ignored
keep_states (bool, optional): Whether to save optimizer states too.
Returns:
dict: Model data, callbacks and datasets
"""
pdict = model.serialize(fn=None, keep_states=keep_states)
if callbacks is not None:
pdict['callbacks'] = callbacks.serialize()
if datasets is not None:
pdict['datasets'] = datasets.serialize()
return pdict
| {
"content_hash": "fb9ecadf7c22922deb6eb132dfa9c996",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 93,
"avg_line_length": 29.583783783783783,
"alnum_prop": 0.6230586515622145,
"repo_name": "Jokeren/neon",
"id": "9bc3ed655be711b6569c4f884b235716676eec7e",
"size": "6219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neon/util/persist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6736"
},
{
"name": "C++",
"bytes": "135410"
},
{
"name": "CSS",
"bytes": "1472272"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Makefile",
"bytes": "12228"
},
{
"name": "Perl",
"bytes": "130963"
},
{
"name": "Python",
"bytes": "1943953"
}
],
"symlink_target": ""
} |
"""
.. module: lemur.pending_certificates.models
Copyright (c) 2018 and onwards Netflix, Inc. All rights reserved.
.. moduleauthor:: James Chuong <jchuong@instartlogic.com>
"""
from datetime import datetime as dt
from sqlalchemy import (
Integer,
ForeignKey,
String,
DefaultClause,
func,
Column,
Text,
Boolean,
)
from sqlalchemy.orm import relationship
from sqlalchemy_utils import JSONType
from sqlalchemy_utils.types.arrow import ArrowType
from lemur.certificates.models import get_sequence
from lemur.common import defaults, utils
from lemur.database import db
from lemur.models import (
pending_cert_source_associations,
pending_cert_destination_associations,
pending_cert_notification_associations,
pending_cert_replacement_associations,
pending_cert_role_associations,
)
from lemur.utils import Vault
def get_or_increase_name(name, serial):
certificates = PendingCertificate.query.filter(
PendingCertificate.name.ilike("{0}%".format(name))
).all()
if not certificates:
return name
serial_name = "{0}-{1}".format(name, hex(int(serial))[2:].upper())
certificates = PendingCertificate.query.filter(
PendingCertificate.name.ilike("{0}%".format(serial_name))
).all()
if not certificates:
return serial_name
ends = [0]
root, end = get_sequence(serial_name)
for cert in certificates:
root, end = get_sequence(cert.name)
if end:
ends.append(end)
return "{0}-{1}".format(root, max(ends) + 1)
class PendingCertificate(db.Model):
__tablename__ = "pending_certs"
id = Column(Integer, primary_key=True)
external_id = Column(String(128))
owner = Column(String(128), nullable=False)
name = Column(String(256), unique=True)
description = Column(String(1024))
notify = Column(Boolean, default=True)
number_attempts = Column(Integer)
rename = Column(Boolean, default=True)
resolved = Column(Boolean, default=False)
resolved_cert_id = Column(Integer, nullable=True)
cn = Column(String(128))
csr = Column(Text(), nullable=False)
chain = Column(Text())
private_key = Column(Vault, nullable=True)
date_created = Column(ArrowType, DefaultClause(func.now()), nullable=False)
dns_provider_id = Column(
Integer, ForeignKey("dns_providers.id", ondelete="CASCADE")
)
status = Column(Text(), nullable=True)
last_updated = Column(
ArrowType, DefaultClause(func.now()), onupdate=func.now(), nullable=False
)
rotation = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey("users.id"))
authority_id = Column(Integer, ForeignKey("authorities.id", ondelete="CASCADE"))
root_authority_id = Column(
Integer, ForeignKey("authorities.id", ondelete="CASCADE")
)
rotation_policy_id = Column(Integer, ForeignKey("rotation_policies.id"))
notifications = relationship(
"Notification",
secondary=pending_cert_notification_associations,
backref="pending_cert",
passive_deletes=True,
)
destinations = relationship(
"Destination",
secondary=pending_cert_destination_associations,
backref="pending_cert",
passive_deletes=True,
)
sources = relationship(
"Source",
secondary=pending_cert_source_associations,
backref="pending_cert",
passive_deletes=True,
)
roles = relationship(
"Role",
secondary=pending_cert_role_associations,
backref="pending_cert",
passive_deletes=True,
)
replaces = relationship(
"Certificate",
secondary=pending_cert_replacement_associations,
backref="pending_cert",
passive_deletes=True,
)
options = Column(JSONType)
rotation_policy = relationship("RotationPolicy")
sensitive_fields = ("private_key",)
def __init__(self, **kwargs):
self.csr = kwargs.get("csr")
self.private_key = kwargs.get("private_key", "")
if self.private_key:
# If the request does not send private key, the key exists but the value is None
self.private_key = self.private_key.strip()
self.external_id = kwargs.get("external_id")
# when destinations are appended they require a valid name.
if kwargs.get("name"):
self.name = get_or_increase_name(defaults.text_to_slug(kwargs["name"]), 0)
self.rename = False
else:
# TODO: Fix auto-generated name, it should be renamed on creation
self.name = get_or_increase_name(
defaults.certificate_name(
kwargs["common_name"],
kwargs["authority"].name,
dt.now(),
dt.now(),
False,
),
self.external_id,
)
self.rename = True
self.cn = defaults.common_name(utils.parse_csr(self.csr))
self.owner = kwargs["owner"]
self.number_attempts = 0
if kwargs.get("chain"):
self.chain = kwargs["chain"].strip()
self.notify = kwargs.get("notify", True)
self.destinations = kwargs.get("destinations", [])
self.notifications = kwargs.get("notifications", [])
self.description = kwargs.get("description")
self.roles = list(set(kwargs.get("roles", [])))
self.replaces = kwargs.get("replaces", [])
self.rotation = kwargs.get("rotation")
self.rotation_policy = kwargs.get("rotation_policy")
try:
self.dns_provider_id = kwargs.get("dns_provider").id
except (AttributeError, KeyError, TypeError, Exception):
pass
| {
"content_hash": "8a0834b8ba1da4e678ef3ddc01c71d3d",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 92,
"avg_line_length": 32.6045197740113,
"alnum_prop": 0.6302200658464737,
"repo_name": "Netflix/lemur",
"id": "ee3e5e97763a7e9942747645352cdec87ede239f",
"size": "5771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemur/pending_certificates/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2728"
},
{
"name": "Dockerfile",
"bytes": "2597"
},
{
"name": "HTML",
"bytes": "314713"
},
{
"name": "JavaScript",
"bytes": "15496"
},
{
"name": "Makefile",
"bytes": "3791"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1530505"
},
{
"name": "Shell",
"bytes": "2339"
}
],
"symlink_target": ""
} |
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
Associate the ImageNet 2012 Challenge validation data set with labels.
The raw ImageNet validation data set is expected to reside in JPEG files
located in the following directory structure.
data_dir/ILSVRC2012_val_00000001.JPEG
data_dir/ILSVRC2012_val_00000002.JPEG
...
data_dir/ILSVRC2012_val_00050000.JPEG
This script moves the files into a directory structure like such:
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
This directory reorganization requires a mapping from validation image
number (i.e. suffix of the original file) to the associated label. This
is provided in the ImageNet development kit via a Matlab file.
In order to make life easier and divorce ourselves from Matlab, we instead
supply a custom text file that provides this mapping for us.
Sample usage:
./preprocess_imagenet_validation_data.py ILSVRC2012_img_val \
imagenet_2012_validation_synset_labels.txt
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Invalid usage\n'
'usage: preprocess_imagenet_validation_data.py '
'<validation data dir> <validation labels file>')
sys.exit(-1)
data_dir = sys.argv[1]
validation_labels_file = sys.argv[2]
# Read in the 50000 synsets associated with the validation data set.
labels = [l.strip() for l in open(validation_labels_file).readlines()]
unique_labels = set(labels)
# Make all sub-directories in the validation data dir.
for label in unique_labels:
labeled_data_dir = os.path.join(data_dir, label)
os.makedirs(labeled_data_dir)
# Move all of the image to the appropriate sub-directory.
for i in range(len(labels)):
basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
original_filename = os.path.join(data_dir, basename)
if not os.path.exists(original_filename):
print('Failed to find: ' % original_filename)
sys.exit(-1)
new_filename = os.path.join(data_dir, labels[i], basename)
os.rename(original_filename, new_filename)
| {
"content_hash": "27c2495580c1c3e634a75b4b03d64010",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 34.14705882352941,
"alnum_prop": 0.7390180878552972,
"repo_name": "mlperf/training_results_v0.6",
"id": "54270f0886d70947d45396ee9c1550912bd6d537",
"size": "3017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/nvidia-examples/imagenet_preparation/preprocess_imagenet_validation_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
from awsdaleks import chaser, warrior, dalek
import boto3
def exterminate(target):
iot = boto3.client('iot', region_name=target["region"])
policyName = target["names"][0]
targets = iot.list_targets_for_policy(policyName=policyName)["targets"]
for targetName in targets:
iot.detach_policy(
policyName=policyName,
target=targetName)
iot.delete_policy(policyName=policyName)
warrior("iot_policy", lambda r: exterminate(r))
| {
"content_hash": "dadabda33563104ac79e5bc7d5541eb0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 29.75,
"alnum_prop": 0.6911764705882353,
"repo_name": "jfaerman/aws-daleks",
"id": "228d3e2657184e3b5dc882abc66ed1a72a39630c",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awsdaleks/iot_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13365"
},
{
"name": "Scala",
"bytes": "74790"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
import unittest
from BlockServer.config.ioc import IOC
class TestContainersSequence(unittest.TestCase):
def test_ioc_to_dict(self):
ioc = IOC("SIMPLE1")
macros = {"macro1": {'value': 123}, "macro2": {'value': "Hello"}}
ioc.macros = macros
d = ioc.to_dict()
self.assertTrue("name" in d)
self.assertTrue("macros" in d)
macrotest = {"name" : "macro1", "value" : 123}
self.assertTrue(macrotest in d["macros"])
macrotest = {"name" : "macro2", "value" : "Hello"}
self.assertTrue(macrotest in d["macros"])
| {
"content_hash": "a35ea60586244cda8aa1faed805a354d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.5938566552901023,
"repo_name": "ISISComputingGroup/EPICS-inst_servers",
"id": "c4fa32ed3c36820e5860c311fa396da6acb79045",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BlockServer/test_modules/test_container.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6451"
},
{
"name": "Python",
"bytes": "1060148"
},
{
"name": "Shell",
"bytes": "4460"
}
],
"symlink_target": ""
} |
from sys import argv
script, user_name = argv
prompt = '>'
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer) | {
"content_hash": "6053d5dd3479f8ce1724b266b341432d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 55,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "michsien/Learning_python",
"id": "d6c6fc374e95184d7677a71dac354251e3819f9c",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises _1-44/ex14.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44824"
}
],
"symlink_target": ""
} |
import bpy
import bmesh
import numpy as np
from typing import List
import sys
from pathlib import Path
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
sys.path.append(str(UTILS_PATH))
import importlib
import ds_utils.blender_utils
importlib.reload(ds_utils.blender_utils)
from ds_utils.blender_utils import create_object
def get_particles():
ref_obj = bpy.context.scene.objects['ref_obj']
# If not called, there are no particles
depsgraph = bpy.context.evaluated_depsgraph_get()
# Equivalent??
par_system = ref_obj.evaluated_get(depsgraph).particle_systems[0]
par_system = depsgraph.objects.get(ref_obj.name, None).particle_systems[0]
# Reset memory.
par_system.seed += 1
par_system.seed -= 1
particles = par_system.particles
return particles
def shape_key_anim(objs_verts: List):
obj = create_object(objs_verts[-1], edges=[], faces=[], obj_name="frame_{}".format(len(objs_verts)))
objs_verts.pop()
sk_basis = obj.shape_key_add(name='Basis')
sk_basis.interpolation = 'KEY_LINEAR'
obj.data.shape_keys.use_relative = False
count = 1
while True:
if not objs_verts:
break
points = objs_verts.pop()
# Create new shape-key block
block = obj.shape_key_add(name=str(count), from_mix=False) # returns a key_blocks member
block.interpolation = 'KEY_LINEAR'
block.value = 0
# Update vertices position
for (vert, co) in zip(block.data, points):
vert.co = co
for vert in block.data[len(points):]:
vert.co = points[-1 ]
# Keyframe evaluation time
#bpy.context.object.active_shape_key_index = count+1
obj.data.shape_keys.eval_time = count * 10
obj.data.shape_keys.keyframe_insert(data_path='eval_time', frame=count*10)
count += 1
def anim_objs(objs_verts: List):
# Convert points to objects
objs = [create_object(verts, edges=[], faces=[], obj_name="obj{}".format(i), collection='keyed_objs')
for i, verts in enumerate(objs_verts)]
# Base particle settings to use for the animation
base_particle_settings = bpy.data.particles['base_particle_settings']
# Reference object which holds the keyed particle system
ref_obj = bpy.context.scene.objects['ref_obj']
keyed_particle_system = ref_obj.particle_systems[0]
for obj in objs:
# create a copy of the base particle system
obj.modifiers.new(obj.name, type='PARTICLE_SYSTEM')
obj.particle_systems[0].settings = base_particle_settings
# keyed the object into our reference
bpy.ops.particle.new_target({'particle_system': keyed_particle_system})
target = keyed_particle_system.active_particle_target
target.object = obj
def anim_particles(objs_verts, nb_frames: int):
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = nb_frames
particles = get_particles()
par_len = particles.data.settings.count
par_birth = np.array([0.0] * par_len, dtype='float')
particles.foreach_get('birth_time', par_birth)
par_birth = np.ceil(par_birth)
run_type = 0
if run_type:
for frame in range(0, nb_frames):
bpy.context.scene.frame_set(frame)
alive_p = [p for p in particles if p.alive_state == 'ALIVE']
for p in alive_p:
rand_obj = np.random.randint(frame+1)
rand_loc = objs_verts[rand_obj][np.random.randint(len(objs_verts[rand_obj]))]
p.location = rand_loc
not_alive_p = [p for p in particles if p.alive_state == 'UNBORN']
for p in not_alive_p:
rand_loc = objs_verts[frame][np.random.randint(len(objs_verts[frame]))]
p.location = rand_loc
else:
for frame in range(0, nb_frames):
print("Updating growth anim for frame {}".format(frame))
bpy.context.scene.frame_set(frame)
target_obj = objs_verts[frame]
par_index = np.where(par_birth == frame+1)
target_obj_verts_rand_idxs = np.random.randint(0, len(target_obj), len(par_index[0]))
for i, p_idx in enumerate(par_index[0]):
particles[p_idx].location = target_obj[target_obj_verts_rand_idxs[i]]
bpy.context.scene.frame_current = 0
def main():
objs_verts = [[(1, 1, i), (1, -1, i), (-1, -1, i)] for i in range(5)]
anim_particles(objs_verts, 5)
#test()
#main() | {
"content_hash": "d75703609f85923b3811df023c57257a",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 105,
"avg_line_length": 32.24285714285714,
"alnum_prop": 0.6315906070004431,
"repo_name": "5agado/data-science-learning",
"id": "3247903855f2267e122cfb4ee9018fa4abbea276",
"size": "4546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/blender/growth_anim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "565"
},
{
"name": "Jupyter Notebook",
"bytes": "2011939"
},
{
"name": "Python",
"bytes": "550056"
}
],
"symlink_target": ""
} |
"""
Utility methods for working with WSGI servers
"""
import errno
import json
import logging
import os
import signal
import sys
import time
import eventlet
from eventlet.green import socket
from eventlet.green import ssl
import eventlet.greenio
import eventlet.wsgi
from oslo.config import cfg
from oslo.utils import importutils
from paste import deploy
import routes
import routes.middleware
import six
import webob.dec
import webob.exc
from heat.api.aws import exception as aws_exception
from heat.common import exception
from heat.common import serializers
from heat.openstack.common import gettextutils
URL_LENGTH_LIMIT = 50000
api_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8004,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
]
api_group = cfg.OptGroup('heat_api')
cfg.CONF.register_group(api_group)
cfg.CONF.register_opts(api_opts,
group=api_group)
api_cfn_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8000,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
]
api_cfn_group = cfg.OptGroup('heat_api_cfn')
cfg.CONF.register_group(api_cfn_group)
cfg.CONF.register_opts(api_cfn_opts,
group=api_cfn_group)
api_cw_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8003,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs.)')),
]
api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
cfg.CONF.register_group(api_cw_group)
cfg.CONF.register_opts(api_cw_opts,
group=api_cw_group)
cfg.CONF.import_opt('debug', 'heat.openstack.common.log')
cfg.CONF.import_opt('verbose', 'heat.openstack.common.log')
json_size_opt = cfg.IntOpt('max_json_body_size',
default=1048576,
help='Maximum raw byte size of JSON request body.'
' Should be larger than max_template_size.')
cfg.CONF.register_opt(json_size_opt)
def list_opts():
yield None, [json_size_opt]
yield 'heat_api', api_opts
yield 'heat_api_cfn', api_cfn_opts
yield 'heat_api_cloudwatch', api_cw_opts
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, LOG, level=logging.DEBUG):
self.LOG = LOG
self.level = level
def write(self, msg):
self.LOG.log(self.level, msg.strip("\n"))
def get_bind_addr(conf, default_port=None):
"""Return the host and port to bind to."""
return (conf.bind_host, conf.bind_port or default_port)
def get_socket(conf, default_port):
"""
Bind socket to bind ip:port in conf
note: Mostly comes from Swift with a few small changes...
:param conf: a cfg.ConfigOpts object
:param default_port: port to bind to if none is specified in conf
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
bind_addr = get_bind_addr(conf, default_port)
# TODO(jaypipes): eventlet's greened socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
cert_file = conf.cert_file
key_file = conf.key_file
use_ssl = cert_file or key_file
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr, backlog=conf.backlog,
family=address_family)
if use_ssl:
sock = ssl.wrap_socket(sock, certfile=cert_file,
keyfile=key_file)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(bind_addr)s"
"after trying for 30 seconds")
% {'bind_addr': bind_addr})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
return sock
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
self.threads = threads
self.children = []
self.running = True
def start(self, application, conf, default_port):
"""
Run a WSGI server with the given application.
:param application: The application to run in the WSGI server
:param conf: a cfg.ConfigOpts object
:param default_port: Port to bind to if none is specified in conf
"""
def kill_children(*args):
"""Kills the entire process group."""
self.LOG.error(_('SIGTERM received'))
signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.running = False
os.killpg(0, signal.SIGTERM)
def hup(*args):
"""
Shuts down the server(s), but allows running requests to complete
"""
self.LOG.error(_('SIGHUP received'))
signal.signal(signal.SIGHUP, signal.SIG_IGN)
os.killpg(0, signal.SIGHUP)
signal.signal(signal.SIGHUP, hup)
eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line
self.application = application
self.sock = get_socket(conf, default_port)
self.LOG = logging.getLogger('eventlet.wsgi.server')
if conf.workers == 0:
# Useful for profiling, test, debug etc.
self.pool = eventlet.GreenPool(size=self.threads)
self.pool.spawn_n(self._single_run, application, self.sock)
return
self.LOG.info(_("Starting %d workers") % conf.workers)
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup)
while len(self.children) < conf.workers:
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self.LOG.error(_('Removing dead child %s') % pid)
self.children.remove(pid)
self.run_child()
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
self.LOG.info(_('Caught keyboard interrupt. Exiting.'))
os.killpg(0, signal.SIGTERM)
break
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
self.LOG.debug('Exited')
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.run_server()
self.LOG.info(_('Child %d exiting normally') % os.getpid())
return
else:
self.LOG.info(_('Started child %s') % pid)
self.children.append(pid)
def run_server(self):
"""Run a WSGI server."""
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
eventlet.hubs.use_hub('poll')
eventlet.patcher.monkey_patch(all=False, socket=True)
self.pool = eventlet.GreenPool(size=self.threads)
try:
eventlet.wsgi.server(self.sock,
self.application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=WritableLogger(self.LOG),
debug=cfg.CONF.debug)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
self.pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
self.LOG.info(_("Starting single process server"))
eventlet.wsgi.server(sock, application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=WritableLogger(self.LOG),
debug=cfg.CONF.debug)
class Middleware(object):
"""
Base WSGI middleware wrapper. These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
def __init__(self, application):
self.application = application
def process_request(self, req):
"""
Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""
Iterator that prints the contents of a wrapper string iterator
when iterated.
"""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
def debug_filter(app, conf, **local_conf):
return Debug(app)
class Router(object):
"""
WSGI middleware that maps incoming requests to WSGI apps.
"""
def __init__(self, mapper):
"""
Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
supported = ('application/json',)
bm = self.accept.best_match(supported)
return bm or 'application/json'
def get_content_type(self, allowed_content_types):
"""Determine content type of the request body."""
if "Content-Type" not in self.headers:
raise exception.InvalidContentType(content_type=None)
content_type = self.content_type
if content_type not in allowed_content_types:
raise exception.InvalidContentType(content_type=content_type)
else:
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = gettextutils.get_available_languages('heat')
return self.accept_language.best_match(all_languages)
def is_json_content_type(request):
if request.method == 'GET':
try:
aws_content_type = request.params.get("ContentType")
except Exception:
aws_content_type = None
#respect aws_content_type when both available
content_type = aws_content_type or request.content_type
else:
content_type = request.content_type
#bug #1887882
#for back compatible for null or plain content type
if not content_type or content_type.startswith('text/plain'):
content_type = 'application/json'
if content_type in ('JSON', 'application/json')\
and request.body.startswith('{'):
return True
return False
class JSONRequestDeserializer(object):
def has_body(self, request):
"""
Returns whether a Webob.Request object will possess an entity body.
:param request: Webob.Request object
"""
if request.content_length > 0 and is_json_content_type(request):
return True
return False
def from_json(self, datastring):
try:
if len(datastring) > cfg.CONF.max_json_body_size:
msg = _('JSON body size (%(len)s bytes) exceeds maximum '
'allowed size (%(limit)s bytes).') % \
{'len': len(datastring),
'limit': cfg.CONF.max_json_body_size}
raise exception.RequestLimitExceeded(message=msg)
return json.loads(datastring)
except ValueError as ex:
raise webob.exc.HTTPBadRequest(six.text_type(ex))
def default(self, request):
if self.has_body(request):
return {'body': self.from_json(request.body)}
else:
return {}
class Resource(object):
"""
WSGI app that handles (de)serialization and controller dispatch.
Reads routing information supplied by RoutesMiddleware and calls
the requested action method upon its deserializer, controller,
and serializer. Those three objects may implement any of the basic
controller action methods (create, update, show, index, delete)
along with any that may be specified in the api router. A 'default'
method may also be implemented to be used in place of any
non-implemented actions. Deserializer methods must accept a request
argument and return a dictionary. Controller methods must accept a
request argument. Additionally, they must also accept keyword
arguments that represent the keys returned by the Deserializer. They
may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, deserializer, serializer=None):
"""
:param controller: object that implement methods created by routes lib
:param deserializer: object that supports webob request deserialization
through controller-like actions
:param serializer: object that supports webob response serialization
through controller-like actions
"""
self.controller = controller
self.deserializer = deserializer
self.serializer = serializer
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# From reading the boto code, and observation of real AWS api responses
# it seems that the AWS api ignores the content-type in the html header
# Instead it looks at a "ContentType" GET query parameter
# This doesn't seem to be documented in the AWS cfn API spec, but it
# would appear that the default response serialization is XML, as
# described in the API docs, but passing a query parameter of
# ContentType=JSON results in a JSON serialized response...
content_type = request.params.get("ContentType")
try:
deserialized_request = self.dispatch(self.deserializer,
action, request)
action_args.update(deserialized_request)
logging.debug(
_('Calling %(controller)s : %(action)s'),
{'controller': self.controller, 'action': action})
action_result = self.dispatch(self.controller, action,
request, **action_args)
except TypeError as err:
logging.error(_('Exception handling resource: %s') % err)
msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg)
http_exc = translate_exception(err, request.best_match_language())
# NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
# treated by wsgi as responses ready to be sent back and they
# won't make it into the pipeline app that serializes errors
raise exception.HTTPExceptionDisguise(http_exc)
except webob.exc.HTTPException as err:
if isinstance(err, aws_exception.HeatAPIException):
# The AWS compatible API's don't use faultwrap, so
# we want to detect the HeatAPIException subclasses
# and raise rather than wrapping in HTTPExceptionDisguise
raise
if not isinstance(err, webob.exc.HTTPError):
# Some HTTPException are actually not errors, they are
# responses ready to be sent back to the users, so we don't
# error log, disguise or translate those
raise
if isinstance(err, webob.exc.HTTPServerError):
logging.error(
_("Returning %(code)s to user: %(explanation)s"),
{'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
except exception.HeatException as err:
raise translate_exception(err, request.best_match_language())
except Exception as err:
log_exception(err, sys.exc_info())
raise translate_exception(err, request.best_match_language())
# Here we support either passing in a serializer or detecting it
# based on the content type.
try:
serializer = self.serializer
if serializer is None:
if content_type == "JSON":
serializer = serializers.JSONResponseSerializer()
else:
serializer = serializers.XMLResponseSerializer()
response = webob.Response(request=request)
self.dispatch(serializer, action, response, action_result)
return response
# return unserializable result (typically an exception)
except Exception:
# Here we should get API exceptions derived from HeatAPIException
# these implement get_unserialized_body(), which allow us to get
# a dict containing the unserialized error response.
# We only need to serialize for JSON content_type, as the
# exception body is pre-serialized to the default XML in the
# HeatAPIException constructor
# If we get something else here (e.g a webob.exc exception),
# this will fail, and we just return it without serializing,
# which will not conform to the expected AWS error response format
if content_type == "JSON":
try:
err_body = action_result.get_unserialized_body()
serializer.default(action_result, err_body)
except Exception:
logging.warning(_("Unable to serialize exception "
"response"))
return action_result
def dispatch(self, obj, action, *args, **kwargs):
"""Find action-specific method on self and call it."""
try:
method = getattr(obj, action)
except AttributeError:
method = getattr(obj, 'default')
return method(*args, **kwargs)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def log_exception(err, exc_info):
args = {'exc_info': exc_info} if cfg.CONF.verbose or cfg.CONF.debug else {}
logging.error(_("Unexpected error occurred serving API: %s") % err,
**args)
def translate_exception(exc, locale):
"""Translates all translatable elements of the given exception."""
if isinstance(exc, exception.HeatException):
exc.message = gettextutils.translate(exc.message, locale)
else:
exc.message = gettextutils.translate(six.text_type(exc), locale)
if isinstance(exc, webob.exc.HTTPError):
# If the explanation is not a Message, that means that the
# explanation is the default, generic and not translatable explanation
# from webop.exc. Since the explanation is the error shown when the
# exception is converted to a response, let's actually swap it with
# message, since message is what gets passed in at construction time
# in the API
if not isinstance(exc.explanation, gettextutils.Message):
exc.explanation = six.text_type(exc)
exc.detail = ''
else:
exc.explanation = \
gettextutils.translate(exc.explanation, locale)
exc.detail = gettextutils.translate(exc.detail, locale)
return exc
class BasePasteFactory(object):
"""A base class for paste app and filter factories.
Sub-classes must override the KEY class attribute and provide
a __call__ method.
"""
KEY = None
def __init__(self, conf):
self.conf = conf
def __call__(self, global_conf, **local_conf):
raise NotImplementedError
def _import_factory(self, local_conf):
"""Import an app/filter class.
Lookup the KEY from the PasteDeploy local conf and import the
class named there. This class can then be used as an app or
filter factory.
Note we support the <module>:<class> format.
Note also that if you do e.g.
key =
value
then ConfigParser returns a value with a leading newline, so
we strip() the value before using it.
"""
class_name = local_conf[self.KEY].replace(':', '.').strip()
return importutils.import_class(class_name)
class AppFactory(BasePasteFactory):
"""A Generic paste.deploy app factory.
This requires heat.app_factory to be set to a callable which returns a
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
The WSGI app constructor must accept a ConfigOpts object and a local config
dict as its two arguments.
"""
KEY = 'heat.app_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.app_factory protocol method."""
factory = self._import_factory(local_conf)
return factory(self.conf, **local_conf)
class FilterFactory(AppFactory):
"""A Generic paste.deploy filter factory.
This requires heat.filter_factory to be set to a callable which returns a
WSGI filter when invoked. The format is <module>:<callable> e.g.
[filter:cache]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.middleware.cache:CacheFilter
The WSGI filter constructor must accept a WSGI app, a ConfigOpts object and
a local config dict as its three arguments.
"""
KEY = 'heat.filter_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.filter_factory protocol method."""
factory = self._import_factory(local_conf)
def filter(app):
return factory(app, self.conf, **local_conf)
return filter
def setup_paste_factories(conf):
"""Set up the generic paste app and filter factories.
Set things up so that:
paste.app_factory = heat.common.wsgi:app_factory
and
paste.filter_factory = heat.common.wsgi:filter_factory
work correctly while loading PasteDeploy configuration.
The app factories are constructed at runtime to allow us to pass a
ConfigOpts object to the WSGI classes.
:param conf: a ConfigOpts object
"""
global app_factory, filter_factory
app_factory = AppFactory(conf)
filter_factory = FilterFactory(conf)
def teardown_paste_factories():
"""Reverse the effect of setup_paste_factories()."""
global app_factory, filter_factory
del app_factory
del filter_factory
def paste_deploy_app(paste_config_file, app_name, conf):
"""Load a WSGI app from a PasteDeploy configuration.
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
ensuring that the supplied ConfigOpts object is passed to the app and
filter constructors.
:param paste_config_file: a PasteDeploy config file
:param app_name: the name of the app/pipeline to load from the file
:param conf: a ConfigOpts object to supply to the app and its filters
:returns: the WSGI app
"""
setup_paste_factories(conf)
try:
return deploy.loadapp("config:%s" % paste_config_file, name=app_name)
finally:
teardown_paste_factories()
| {
"content_hash": "5f6b43c53a942a33fa6c86cac79692aa",
"timestamp": "",
"source": "github",
"line_count": 874,
"max_line_length": 79,
"avg_line_length": 37.31464530892448,
"alnum_prop": 0.6029190813479287,
"repo_name": "redhat-openstack/heat",
"id": "b8dc36c0150b9f45ef8a74708a8985559b9c9c22",
"size": "33374",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "heat/common/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827027"
},
{
"name": "Shell",
"bytes": "26720"
}
],
"symlink_target": ""
} |
from google.cloud.bigquery import datapolicies_v1beta1
async def sample_list_data_policies():
# Create a client
client = datapolicies_v1beta1.DataPolicyServiceAsyncClient()
# Initialize request argument(s)
request = datapolicies_v1beta1.ListDataPoliciesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_data_policies(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END bigquerydatapolicy_v1beta1_generated_DataPolicyService_ListDataPolicies_async]
| {
"content_hash": "5124f228606cd2cfe0acfec60714662c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 85,
"avg_line_length": 29,
"alnum_prop": 0.7396551724137931,
"repo_name": "googleapis/python-bigquery-datapolicies",
"id": "94a0fa2a4a2ecf7630d283f53a745b711bcd9ac8",
"size": "2006",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/bigquerydatapolicy_v1beta1_generated_data_policy_service_list_data_policies_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "336915"
},
{
"name": "Shell",
"bytes": "30705"
}
],
"symlink_target": ""
} |
import pickle
import pytest
from pyrsistent import CheckedPMap, InvariantException, PMap, CheckedType, CheckedPSet, CheckedPVector, \
CheckedKeyTypeError, CheckedValueTypeError
class FloatToIntMap(CheckedPMap):
__key_type__ = float
__value_type__ = int
__invariant__ = lambda key, value: (int(key) == value, 'Invalid mapping')
def test_instantiate():
x = FloatToIntMap({1.25: 1, 2.5: 2})
assert dict(x.items()) == {1.25: 1, 2.5: 2}
assert isinstance(x, FloatToIntMap)
assert isinstance(x, PMap)
assert isinstance(x, CheckedType)
def test_instantiate_empty():
x = FloatToIntMap()
assert dict(x.items()) == {}
assert isinstance(x, FloatToIntMap)
def test_set():
x = FloatToIntMap()
x2 = x.set(1.0, 1)
assert x2[1.0] == 1
assert isinstance(x2, FloatToIntMap)
def test_invalid_key_type():
with pytest.raises(CheckedKeyTypeError):
FloatToIntMap({1: 1})
def test_invalid_value_type():
with pytest.raises(CheckedValueTypeError):
FloatToIntMap({1.0: 1.0})
def test_breaking_invariant():
try:
FloatToIntMap({1.5: 2})
assert False
except InvariantException as e:
assert e.invariant_errors == ('Invalid mapping',)
def test_repr():
x = FloatToIntMap({1.25: 1})
assert str(x) == 'FloatToIntMap({1.25: 1})'
def test_default_serialization():
x = FloatToIntMap({1.25: 1, 2.5: 2})
assert x.serialize() == {1.25: 1, 2.5: 2}
class StringFloatToIntMap(FloatToIntMap):
@staticmethod
def __serializer__(format, key, value):
return format.format(key), format.format(value)
def test_custom_serialization():
x = StringFloatToIntMap({1.25: 1, 2.5: 2})
assert x.serialize("{0}") == {"1.25": "1", "2.5": "2"}
class FloatSet(CheckedPSet):
__type__ = float
class IntToFloatSetMap(CheckedPMap):
__key_type__ = int
__value_type__ = FloatSet
def test_multi_level_serialization():
x = IntToFloatSetMap.create({1: [1.25, 1.50], 2: [2.5, 2.75]})
assert str(x) == "IntToFloatSetMap({1: FloatSet([1.5, 1.25]), 2: FloatSet([2.75, 2.5])})"
sx = x.serialize()
assert sx == {1: set([1.5, 1.25]), 2: set([2.75, 2.5])}
assert isinstance(sx[1], set)
def test_create_non_checked_types():
assert FloatToIntMap.create({1.25: 1, 2.5: 2}) == FloatToIntMap({1.25: 1, 2.5: 2})
def test_create_checked_types():
class IntSet(CheckedPSet):
__type__ = int
class FloatVector(CheckedPVector):
__type__ = float
class IntSetToFloatVectorMap(CheckedPMap):
__key_type__ = IntSet
__value_type__ = FloatVector
x = IntSetToFloatVectorMap.create({frozenset([1, 2]): [1.25, 2.5]})
assert str(x) == "IntSetToFloatVectorMap({IntSet([1, 2]): FloatVector([1.25, 2.5])})"
def test_evolver_returns_same_instance_when_no_updates():
x = FloatToIntMap({1.25: 1, 2.25: 2})
assert x.evolver().persistent() is x
def test_map_with_no_types_or_invariants():
class NoCheckPMap(CheckedPMap):
pass
x = NoCheckPMap({1: 2, 3: 4})
assert x[1] == 2
assert x[3] == 4
def test_pickling():
x = FloatToIntMap({1.25: 1, 2.5: 2})
y = pickle.loads(pickle.dumps(x, -1))
assert x == y
assert isinstance(y, FloatToIntMap)
class FloatVector(CheckedPVector):
__type__ = float
class VectorToSetMap(CheckedPMap):
__key_type__ = 'checked_map_test.FloatVector'
__value_type__ = 'checked_map_test.FloatSet'
def test_type_check_with_string_specification():
content = [1.5, 2.0]
vec = FloatVector(content)
sett = FloatSet(content)
map = VectorToSetMap({vec: sett})
assert map[vec] == sett
def test_type_creation_with_string_specification():
content = (1.5, 2.0)
map = VectorToSetMap.create({content: content})
assert map[FloatVector(content)] == set(content)
def test_supports_weakref():
import weakref
weakref.ref(VectorToSetMap({}))
| {
"content_hash": "99733523c9247896e66f29289967de2b",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 105,
"avg_line_length": 25.901315789473685,
"alnum_prop": 0.6332232664465329,
"repo_name": "tobgu/pyrsistent",
"id": "aae07ed470dfa83bf814b70972d1fef528228343",
"size": "3937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/checked_map_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49792"
},
{
"name": "Python",
"bytes": "299428"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
} |
'''
Linux accelerometer
---------------------
'''
from plyer_lach.facades import Accelerometer
import os
import glob
import re
class LinuxAccelerometer(Accelerometer):
def _enable(self):
pass
def _disable(self):
pass
def _get_acceleration(self):
try:
pos = glob.glob("/sys/devices/platform/*/position")[0]
except IndexError:
raise Exception('Could not enable accelerometer!')
with open(pos, "r") as p:
t = p.read()
coords = re.findall(r"[-]?\d+\.?\d*", t)
# Apparently the acceleration on sysfs goes from -1000 to 1000.
# I divide it by 100 to make it equivalent to Android.
# The negative is because the coordinates are inverted on Linux
return [float(i) / -100 for i in coords]
def instance():
return LinuxAccelerometer()
| {
"content_hash": "0b56844d00848906833fabc6ce80a4be",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.5864406779661017,
"repo_name": "locksmith47/turing-sim-kivy",
"id": "804d92993beac318748b9d5f40319b1f4e9dfdc2",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plyer_lach/platforms/linux/accelerometer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Python",
"bytes": "297059"
},
{
"name": "Tcl",
"bytes": "1448"
}
],
"symlink_target": ""
} |
import logging
import platform
import subprocess
import sys
import warnings
from pathlib import Path
from pytest import raises, mark
from twisted import version as twisted_version
from twisted.internet import defer
from twisted.python.versions import Version
from twisted.trial import unittest
import scrapy
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.misc import load_object
from scrapy.utils.test import get_crawler
from scrapy.extensions.throttle import AutoThrottle
from scrapy.extensions import telnet
from scrapy.utils.test import get_testenv
from pkg_resources import parse_version
from w3lib import __version__ as w3lib_version
from tests.mockserver import MockServer
class BaseCrawlerTest(unittest.TestCase):
def assertOptionIsDefault(self, settings, key):
self.assertIsInstance(settings, Settings)
self.assertEqual(settings[key], getattr(default_settings, key))
class CrawlerTestCase(BaseCrawlerTest):
def test_populate_spidercls_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority='project')
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
crawler = Crawler(CustomSettingsSpider, settings)
self.assertEqual(crawler.settings.get('TEST1'), 'spider')
self.assertEqual(crawler.settings.get('TEST2'), 'spider')
self.assertEqual(crawler.settings.get('TEST3'), 'project')
self.assertFalse(settings.frozen)
self.assertTrue(crawler.settings.frozen)
def test_crawler_accepts_dict(self):
crawler = get_crawler(DefaultSpider, {'foo': 'bar'})
self.assertEqual(crawler.settings['foo'], 'bar')
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_accepts_None(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_rejects_spider_objects(self):
with raises(ValueError):
Crawler(DefaultSpider())
class SpiderSettingsTestCase(unittest.TestCase):
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'AUTOTHROTTLE_ENABLED': True
}
crawler = get_crawler(MySpider)
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
self.assertIn(AutoThrottle, enabled_exts)
class CrawlerLoggingTestCase(unittest.TestCase):
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = 'spider'
get_crawler(MySpider)
assert get_scrapy_root_handler() is None
def test_spider_custom_settings_log_level(self):
log_file = Path(self.mktemp())
log_file.write_text('previous message\n', encoding='utf-8')
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_LEVEL': 'INFO',
'LOG_FILE': str(log_file),
# settings to avoid extra warnings
'REQUEST_FINGERPRINTER_IMPLEMENTATION': '2.7',
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
self.assertEqual(get_scrapy_root_handler().level, logging.DEBUG)
crawler = get_crawler(MySpider)
self.assertEqual(get_scrapy_root_handler().level, logging.INFO)
info_count = crawler.stats.get_value('log_count/INFO')
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
logged = log_file.read_text(encoding='utf-8')
self.assertIn('previous message', logged)
self.assertNotIn('debug message', logged)
self.assertIn('info message', logged)
self.assertIn('warning message', logged)
self.assertIn('error message', logged)
self.assertEqual(crawler.stats.get_value('log_count/ERROR'), 1)
self.assertEqual(crawler.stats.get_value('log_count/WARNING'), 1)
self.assertEqual(
crawler.stats.get_value('log_count/INFO') - info_count, 1)
self.assertEqual(crawler.stats.get_value('log_count/DEBUG', 0), 0)
def test_spider_custom_settings_log_append(self):
log_file = Path(self.mktemp())
log_file.write_text('previous message\n', encoding='utf-8')
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_FILE': str(log_file),
'LOG_FILE_APPEND': False,
# disable telnet if not available to avoid an extra warning
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
get_crawler(MySpider)
logging.debug('debug message')
logged = log_file.read_text(encoding='utf-8')
self.assertNotIn('previous message', logged)
self.assertIn('debug message', logged)
class SpiderLoaderWithWrongInterface:
def unneeded_method(self):
pass
class CustomSpiderLoader(SpiderLoader):
pass
class CrawlerRunnerTestCase(BaseCrawlerTest):
def test_spider_manager_verify_interface(self):
settings = Settings({
'SPIDER_LOADER_CLASS': SpiderLoaderWithWrongInterface,
})
with warnings.catch_warnings(record=True) as w:
self.assertRaises(AttributeError, CrawlerRunner, settings)
self.assertEqual(len(w), 1)
self.assertIn("SPIDER_LOADER_CLASS", str(w[0].message))
self.assertIn("scrapy.interfaces.ISpiderLoader", str(w[0].message))
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner(Settings())
spiders = runner.spiders
self.assertEqual(len(w), 1)
self.assertIn("CrawlerRunner.spiders", str(w[0].message))
self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
sl_cls = load_object(runner.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
class ExceptionSpider(scrapy.Spider):
name = 'exception'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError('Exception in from_crawler method')
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
@mark.usefixtures('reactor_pytest')
class CrawlerRunnerHasSpider(unittest.TestCase):
def _runner(self):
return CrawlerRunner({'REQUEST_FINGERPRINTER_IMPLEMENTATION': '2.7'})
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful(self):
runner = self._runner()
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = self._runner()
yield runner.crawl(NoRequestsSpider)
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed(self):
runner = self._runner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = self._runner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_asyncio_enabled_true(self):
if self.reactor_pytest == 'asyncio':
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"REQUEST_FINGERPRINTER_IMPLEMENTATION": "2.7",
})
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
runner = CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"REQUEST_FINGERPRINTER_IMPLEMENTATION": "2.7",
})
yield runner.crawl(NoRequestsSpider)
class ScriptRunnerMixin:
script_dir: Path
def run_script(self, script_name: str, *script_args):
script_path = self.script_dir / script_name
args = [sys.executable, str(script_path)] + list(script_args)
p = subprocess.Popen(args, env=get_testenv(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stderr.decode('utf-8')
class CrawlerProcessSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = Path(__file__).parent.resolve() / 'CrawlerProcess'
def test_simple(self):
log = self.run_script('simple.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_multi(self):
log = self.run_script('multi.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertNotIn("ReactorAlreadyInstalledError", log)
def test_reactor_default(self):
log = self.run_script('reactor_default.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertNotIn("ReactorAlreadyInstalledError", log)
def test_reactor_default_twisted_reactor_select(self):
log = self.run_script('reactor_default_twisted_reactor_select.py')
if platform.system() in ['Windows', 'Darwin']:
# The goal of this test function is to test that, when a reactor is
# installed (the default one here) and a different reactor is
# configured (select here), an error raises.
#
# In Windows the default reactor is the select reactor, so that
# error does not raise.
#
# If that ever becomes the case on more platforms (i.e. if Linux
# also starts using the select reactor by default in a future
# version of Twisted), then we will need to rethink this test.
self.assertIn('Spider closed (finished)', log)
else:
self.assertNotIn('Spider closed (finished)', log)
self.assertIn(
(
"does not match the requested one "
"(twisted.internet.selectreactor.SelectReactor)"
),
log,
)
def test_reactor_select(self):
log = self.run_script('reactor_select.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("ReactorAlreadyInstalledError", log)
def test_reactor_select_twisted_reactor_select(self):
log = self.run_script('reactor_select_twisted_reactor_select.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("ReactorAlreadyInstalledError", log)
def test_reactor_select_subclass_twisted_reactor_select(self):
log = self.run_script('reactor_select_subclass_twisted_reactor_select.py')
self.assertNotIn('Spider closed (finished)', log)
self.assertIn(
(
"does not match the requested one "
"(twisted.internet.selectreactor.SelectReactor)"
),
log,
)
def test_asyncio_enabled_no_reactor(self):
log = self.run_script('asyncio_enabled_no_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_asyncio_enabled_reactor(self):
log = self.run_script('asyncio_enabled_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
@mark.skipif(parse_version(w3lib_version) >= parse_version("2.0.0"),
reason='w3lib 2.0.0 and later do not allow invalid domains.')
def test_ipv6_default_name_resolver(self):
log = self.run_script('default_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("'downloader/exception_type_count/twisted.internet.error.DNSLookupError': 1,", log)
self.assertIn(
"twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: ::1.",
log)
def test_caching_hostname_resolver_ipv6(self):
log = self.run_script("caching_hostname_resolver_ipv6.py")
self.assertIn("Spider closed (finished)", log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_caching_hostname_resolver_finite_execution(self):
with MockServer() as mock_server:
http_address = mock_server.http_address.replace("0.0.0.0", "127.0.0.1")
log = self.run_script("caching_hostname_resolver.py", http_address)
self.assertIn("Spider closed (finished)", log)
self.assertNotIn("ERROR: Error downloading", log)
self.assertNotIn("TimeoutError", log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_twisted_reactor_select(self):
log = self.run_script("twisted_reactor_select.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log)
@mark.skipif(platform.system() == 'Windows', reason="PollReactor is not supported on Windows")
def test_twisted_reactor_poll(self):
log = self.run_script("twisted_reactor_poll.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.pollreactor.PollReactor", log)
def test_twisted_reactor_asyncio(self):
log = self.run_script("twisted_reactor_asyncio.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_twisted_reactor_asyncio_custom_settings(self):
log = self.run_script("twisted_reactor_custom_settings.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_twisted_reactor_asyncio_custom_settings_same(self):
log = self.run_script("twisted_reactor_custom_settings_same.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_twisted_reactor_asyncio_custom_settings_conflict(self):
log = self.run_script("twisted_reactor_custom_settings_conflict.py")
self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log)
self.assertIn("(twisted.internet.selectreactor.SelectReactor) does not match the requested one", log)
@mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
@mark.skipif(platform.system() == 'Windows', reason='uvloop does not support Windows')
@mark.skipif(twisted_version == Version('twisted', 21, 2, 0), reason='https://twistedmatrix.com/trac/ticket/10106')
def test_custom_loop_asyncio(self):
log = self.run_script("asyncio_custom_loop.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
@mark.skipif(sys.implementation.name == "pypy", reason="uvloop does not support pypy properly")
@mark.skipif(platform.system() == "Windows", reason="uvloop does not support Windows")
@mark.skipif(twisted_version == Version('twisted', 21, 2, 0), reason='https://twistedmatrix.com/trac/ticket/10106')
def test_custom_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py", "uvloop.Loop")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
self.assertIn("async pipeline opened!", log)
@mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
@mark.skipif(platform.system() == 'Windows', reason='uvloop does not support Windows')
@mark.skipif(twisted_version == Version('twisted', 21, 2, 0), reason='https://twistedmatrix.com/trac/ticket/10106')
def test_asyncio_enabled_reactor_same_loop(self):
log = self.run_script("asyncio_enabled_reactor_same_loop.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
@mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
@mark.skipif(platform.system() == 'Windows', reason='uvloop does not support Windows')
@mark.skipif(twisted_version == Version('twisted', 21, 2, 0), reason='https://twistedmatrix.com/trac/ticket/10106')
def test_asyncio_enabled_reactor_different_loop(self):
log = self.run_script("asyncio_enabled_reactor_different_loop.py")
self.assertNotIn("Spider closed (finished)", log)
self.assertIn(
(
"does not match the one specified in the ASYNCIO_EVENT_LOOP "
"setting (uvloop.Loop)"
),
log,
)
def test_default_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertNotIn("Using asyncio event loop: uvloop.Loop", log)
self.assertIn("async pipeline opened!", log)
class CrawlerRunnerSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = Path(__file__).parent.resolve() / 'CrawlerRunner'
def test_response_ip_address(self):
log = self.run_script("ip_address.py")
self.assertIn("INFO: Spider closed (finished)", log)
self.assertIn("INFO: Host: not.a.real.domain", log)
self.assertIn("INFO: Type: <class 'ipaddress.IPv4Address'>", log)
self.assertIn("INFO: IP address: 127.0.0.1", log)
| {
"content_hash": "65a99f27af376e07a48d48914cebe05e",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 119,
"avg_line_length": 42.417004048582996,
"alnum_prop": 0.6645509210651904,
"repo_name": "scrapy/scrapy",
"id": "c09f1a6f2c0218e86d4e6c76fbd732fbd10bbcb8",
"size": "20954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_crawler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "2021119"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
'''
Created on Nov 17, 2013
@author: leoreyes
'''
from google import search
from Crawler import findContactPage
if __name__ == '__main__':
inputFileName = raw_input("Enter name of input file: ")
writeFileName = raw_input("Enter name of output file: ")
writeFile = open(writeFileName, "a")
with open(inputFileName) as file:
for line in file:
searchKeyWord = line
numTemp=1
stopTemp=1
url = search(searchKeyWord,num=numTemp,stop=stopTemp,pause=5.0)
writeFile.write(url)
#writeFile.write("\n")
contactStr = findContactPage(url)
print contactStr
if(len(contactStr) > 0):
contactPage = google.get_page(contactStr[0].get("href"))
print contactStr[0].get("href")#.find_parents("a")
soup = BeautifulSoup(contactPage)
emailStr = soup.find_all(text=re.compile("[\w\.-]+@[\w\.-]+"))
if(len(emailStr) > 0) :
writeFile.write("\t" + emailStr + "\n")
print emailStr
else:
print "could not find email"
else:
writeFile.write("\n")
print "could not find contacts page"
file.close()
pass | {
"content_hash": "7ef90b24f1270cbfc365b5f502a5f19c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 33.23684210526316,
"alnum_prop": 0.5534441805225653,
"repo_name": "LeoYReyes/GoogleSearchAutomator",
"id": "797bfecbcf9f82e5b47b8720501faf59111d52a7",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoSearch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "288735"
}
],
"symlink_target": ""
} |
import unittest
from pyobjus import autoclass, dereference, ObjcInt
from pyobjus.dylib_manager import load_dylib
from pyobjus.objc_py_types import NSRange
Car = car = None
class DereferenceTest(unittest.TestCase):
def setUp(self):
global Car, car
load_dylib('testlib.dylib', usr_path=False)
Car = autoclass('Car')
car = Car.alloc().init()
def test_dereference_basic(self):
rng_ptr = car.makeRangePtr()
rng = dereference(rng_ptr)
self.assertEqual(rng.location, 567)
self.assertEqual(rng.length, 123)
def test_dereference_with_type(self):
int_ptr = car.makeIntVoidPtr()
int_val = dereference(int_ptr, of_type=ObjcInt)
self.assertEqual(int_val, 12345)
| {
"content_hash": "67d833b3bb4f6d9283e62a78d3cc3a89",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 55,
"avg_line_length": 30.16,
"alnum_prop": 0.6671087533156499,
"repo_name": "kivy/pyobjus",
"id": "4fedfb50f3c979c365706b9095ddb0f7ade9417a",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dereference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2396"
},
{
"name": "Cython",
"bytes": "104929"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Objective-C",
"bytes": "33680"
},
{
"name": "Python",
"bytes": "210401"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
} |
import logging
from google.appengine.api import users
from vendors import models
class RequestUserMiddleware(object):
"""Middleware to add a user and a user_account to each request.
The cached current account class variable is also updated.
"""
def process_request(self, request):
user = users.get_current_user()
request.user = user
if user:
account = models.UserAccount.get_account_for_user(user)
models.UserAccount.current_account = account
request.user_account = account
request.is_admin = users.is_current_user_admin()
| {
"content_hash": "d2fe1ccedb23183b34e425262ff746e8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 29.19047619047619,
"alnum_prop": 0.6769983686786297,
"repo_name": "tectronics/engayged",
"id": "0088088976a0e75379806bb1815fbf49521ff68f",
"size": "1227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendors/middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1260"
},
{
"name": "HTML",
"bytes": "31186"
},
{
"name": "JavaScript",
"bytes": "2589"
},
{
"name": "Python",
"bytes": "61245"
}
],
"symlink_target": ""
} |
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from mimetypes import guess_type
from email.encoders import encode_base64
from smtplib import SMTP
class Email:
"""
Classe utilitaire permettant de construire la forme du courriel (From, To, Message , Attachement, etc.)
"""
def __init__(self, from_, to, subject, message, message_type='plain', attachments=None, message_encoding='utf-8'):
self.email = MIMEMultipart()
self.email['From'] = from_
self.email['To'] = to
self.email['Subject'] = subject
# Text brute
text = MIMEText(message, message_type, message_encoding)
self.email.attach(text)
# Attachements (image par exemple)
if attachments is not None:
self.attach_files(attachments)
"""
Permet d'attacher des fichiers (images par exemple) dans un courriel
"""
def attach_files(self, attachments):
for file_name in attachments:
# Récupérer le type de fichier
mimetype, encoding = guess_type(file_name)
mimetype = mimetype.split('/', 1)
# Lire le fichier
fp = open(file_name, 'rb')
# Ajouter l'attachement
attachment = MIMEBase(mimetype[0], mimetype[1])
attachment.set_payload(fp.read())
fp.close()
encode_base64(attachment)
attachment.add_header('Content-Disposition', 'attachment', filename=os.path.basename(file_name))
self.email.attach(attachment)
def __str__(self):
return self.email.as_string()
class EmailConnection:
"""
Classe utilitaire permettant gérer la connexion au serveur SMTP et l'envoi de courriel
"""
def __init__(self, smtp, port, username, password):
self.smtp = smtp
self.port = port
self.username = username
self.password = password
self.connect()
"""
Connection au serveur SMTP de la messagerie avec No. de port
"""
def connect(self):
self.connection = SMTP(self.smtp, self.port)
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
self.connection.login(self.username, self.password)
"""
Déconnection du serveur
"""
def close(self):
self.connection.close()
"""
Envoyer un courriel
"""
def send(self, message, from_=None, to=None):
if type(message) == str:
if from_ is None or to is None:
raise Exception("Vous devez spécifier un champ 'from' et un champ 'to' pour envoyer votre courriel.")
else:
from_ = message.email['From']
to = message.email['To']
message = str(message)
return self.connection.sendmail(from_, to, message)
| {
"content_hash": "1bc763fe12715b93d5f6fe07f6a03926",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 118,
"avg_line_length": 32.077777777777776,
"alnum_prop": 0.6127467959819882,
"repo_name": "ClubReflets/ImageMailer",
"id": "74c53f75129f7237e8faad1e5ed3d24eccb8a329",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "email_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36230"
},
{
"name": "Python",
"bytes": "10560"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from laserchicken import keys
from laserchicken.feature_extractor.feature_extraction import compute_features
from laserchicken.test_tools import create_point_cloud, create_points_in_xy_grid
from laserchicken.volume_specification import InfiniteCylinder
class TestExtractSigmaZ(unittest.TestCase):
def test_constantValues_result0(self):
def z_constant(x, y):
return 2
assert_std_for_z_function_in_xy_grid(z_constant, 0)
def test_checkered1sAnd0s_resultHalf(self):
"""Standard deviation of checker pattern of 0s and 1s should be 0.5"""
def z_checkered(x, y):
return ((x + y) % 2) + 2
assert_std_for_z_function_in_xy_grid(z_checkered, 0.5)
def test_checkered1sAnd0sPlusSkewed_resultHalf(self):
"""Standard deviation of checker pattern of 0s and 1s should be 0.5, adding a plane should not change that"""
def z_checkered(x, y):
return ((x + y) % 2) + 2 + x + y
assert_std_for_z_function_in_xy_grid(z_checkered, 0.5)
def assert_std_for_z_function_in_xy_grid(z_checkered, expected):
"""Assert that the standard deviation of z values in a grid of unit x and y"""
n_points, points = create_points_in_xy_grid(z_checkered)
point_cloud = create_point_cloud(points[:, 0], points[:, 1], points[:, 2])
targets = create_point_cloud([0], [0], [0])
compute_features(point_cloud, [range(n_points)], targets, [
'sigma_z'], InfiniteCylinder(10))
np.testing.assert_almost_equal(
targets[keys.point]['sigma_z']['data'][0], expected)
| {
"content_hash": "22b8f6079190486206916c687a0dda13",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 117,
"avg_line_length": 35.08695652173913,
"alnum_prop": 0.6716232961586122,
"repo_name": "eEcoLiDAR/eEcoLiDAR",
"id": "e47acadd8453a92a5ab4310b3446220522a18a24",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "laserchicken/feature_extractor/test_sigma_z_feature_extractor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143451"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from cats.models import Breed
class BreedSerializer(serializers.ModelSerializer):
"""Breed Serializer Class"""
class Meta:
model = Breed
fields = ('id', 'name',)
| {
"content_hash": "86bee4f9dae38a410d9f41140450dcf2",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 22.8,
"alnum_prop": 0.6842105263157895,
"repo_name": "OscaRoa/api-cats",
"id": "ae8f77a0e8450f6bfdf2574053fcfd6e46b2f3e7",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cats/serializers/breed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13238"
}
],
"symlink_target": ""
} |
class no_update(object):
_model = None
def __init__(self, model, fire_update_needed=True):
self._model = model
self._fire_update_needed = fire_update_needed
def __enter__(self):
if self._model:
self._model._no_update = True
def __exit__(self, _type, value, _traceback):
if self._model:
self._model._no_update = False
if self._fire_update_needed:
if hasattr(self._model, 'update_needed'):
self._model.update_needed = True
# ============= EOF =============================================
| {
"content_hash": "64fd8cb7b342a9d22a574c97c5bf41c0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 32.1578947368421,
"alnum_prop": 0.4909983633387889,
"repo_name": "UManPychron/pychron",
"id": "92c34b66013cf5700af63d2dbc3b880a852a2d04",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/core/helpers/ctx_managers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
} |
import re
import logging
class Nucleotide(object):
"""The Nucleotide class represents DNA changes in the COSMIC database.
The Nucleotide class follows the syntax of HGVS
(http://www.hgvs.org/mutnomen/recs-DNA.html).
"""
def __init__(self, hgvs='', occurrence=1,
len5ss=2, len3ss=-2):
self.logger = logging.getLogger(__name__)
self.occurrence = occurrence
self.len5ss = len5ss # 5' splice site len
self.len3ss = len3ss # 3' splice site len
self.hgvs_original = hgvs # unmodified hgvs copy
self.hgvs = hgvs if not hgvs.startswith('c.') else hgvs[2:] # modified
self.set_nucleotide()
self.set_mutation_type()
def set_nucleotide(self, hgvs_nuc=''):
hgvs_tmp = hgvs_nuc if hgvs_nuc else self.hgvs
self.__set_unknown_effect(hgvs_tmp) # completely unknown
self.__set_missing_info(hgvs_tmp) # has missing information
self.__set_nucleotide_mutation(hgvs_tmp) # set mutation type
self.__parse_hgvs_syntax(hgvs_tmp)
def set_mutation_type(self, mut_type=''):
"""Sets a string designating the mutation type.
Args:
mut_type (str): name of mutation type
"""
if mut_type:
# specified mutation type
self.mutation_type = mut_type
else:
# interpret mutation type from attributes
if not self.is_valid:
# does not correctly fall into a category
self.mutation_type = 'not valid'
elif self.unknown_effect:
self.mutation_type = 'unknown effect'
elif self.is_missing_info:
self.mutation_type = 'missing'
elif self.is_substitution:
self.mutation_type = 'substitution'
elif self.is_deletion:
self.mutation_type = 'deletion'
elif self.is_insertion:
self.mutation_type = 'insertion'
# check if mutation at splice site
self.__set_splice_mutation()
def __set_splice_mutation(self):
"""Set the is_splicing_mutation flag"""
#len5ss = 6 # positive number since 5SS
#len3ss = -20 # use negative syntax like HGVS
if type(self.intron_pos) == int:
# SNV case, only one position
if self.len3ss <= self.intron_pos <= self.len5ss:
self.is_splicing_mutation = True
else:
self.is_splicing_mutation = False
elif type(self.intron_pos) == list:
# deletion case, now have a window to check overlap
if self.intron_pos[0]:
first_in_splice = self.len3ss <= self.intron_pos[0] <= self.len5ss
tmp_pos1 = self.intron_pos[0]
else:
first_in_splice = False
tmp_pos1 = 0
if self.intron_pos[1]:
second_in_splice = self.len3ss <= self.intron_pos[1] <= self.len5ss
tmp_pos2 = self.intron_pos[1]
else:
second_in_splice = False
tmp_pos2 = 0
# set splice site mutation flag
if first_in_splice or second_in_splice:
self.is_splicing_mutation = True
elif (tmp_pos1 == 0 and tmp_pos2 > self.len5ss) or (tmp_pos1 < self.len3ss and tmp_pos2 == 0):
self.is_splicing_mutation = True
else:
self.is_splicing_mutation = False
else:
self.is_splicing_mutation = False
def __set_unknown_effect(self, hgvs_str):
"""Sets a flag for unkown effect (c.? or ?).
Note: Unavailable information according to HGVS is usually
marked with a c.?, ?, or parethesis.
Args:
hgvs_str (str): DNA HGVS string
"""
unknown_effect_list = ['c.?', '?']
if hgvs_str.lower() in unknown_effect_list:
self.unknown_effect = True
elif hgvs_str.startswith("("):
self.unknown_effect = True
else:
self.unknown_effect = False
def __set_missing_info(self, hgvs_str):
"""Sets a flag for missing data (? in HGVS syntax).
Args:
hgvs_str (str): DNA HGVS string
"""
if '?' in hgvs_str:
self.is_missing_info = True
else:
self.is_missing_info = False
def __set_nucleotide_mutation(self, hgvs_str):
"""Interpret the HGVS syntax and set appropriate mutation type
attributes (substitution, insertion, etc.).
Args:
hgvs_str (str): string representing HGVS DNA mutation (no "c.")
"""
self.__set_substitution_status(hgvs_str)
self.__set_indel_status(hgvs_str)
def __set_substitution_status(self, hgvs_str):
self.is_substitution = '>' in hgvs_str
def __set_indel_status(self, hgvs_str):
"""Sets attribute flags for whether mutation is a insertion, deletion,
and indel.
"""
# set deletion status
self.is_deletion = 'del' in hgvs_str
# set insertion status
self.is_insertion = 'ins' in hgvs_str
# set indel status
if self.is_insertion or self.is_deletion:
self.is_indel = True
else:
self.is_indel = False
def __parse_hgvs_syntax(self, hgvs_str):
"""Parse the HGVS DNA mutation string to set attributes.
Look at tests/test_nucleotide.py for examples on how
specific HGVS strings should be parsed.
Args:
hgvs_str (str): DNA HGVS string
"""
self.is_valid = True # assume initially the syntax is valid
if self.is_substitution:
sub_pattern = '(?:(\d+)([+-]\d+)?_)?(\d+)([+-]\d+)?([A-Z]+)>([A-Z]+)$'
matches = re.findall(sub_pattern, hgvs_str)
if matches:
init_pos, init_intron, reg_pos, reg_intron, initial, mutated = matches[0]
if not init_pos:
self.pos = int(reg_pos)
self.intron_pos = int(reg_intron) if reg_intron != '' else None
self.initial = initial
self.mutated = mutated
else:
init_pos = init_pos.strip('_') # remove separating underscore
self.pos = [int(init_pos), int(reg_pos)]
intron_tmp1 = int(init_intron) if init_intron != '' else None
intron_tmp2 = int(reg_intron) if reg_intron != '' else None
self.intron_pos = [intron_tmp1, intron_tmp2]
self.initial = initial
self.mutated = mutated
else:
self.is_valid = False
self.intron_pos = None
self.logger.debug('(Parsing-Problem) Invalid DNA Substitution: ' + hgvs_str)
return
elif self.is_deletion:
del_pattern = '(?:([0-9?]+)([-+]\d+)?(?:_))?([0-9?]+)([-+]\d+)?del([A-Z?0-9]+)$'
matches = re.findall(del_pattern, hgvs_str)
if matches:
init_pos, init_intron, reg_pos, reg_intron, del_nuc = matches[0]
if not init_pos:
# only one nucleotide deleted
self.pos = int(reg_pos) if reg_pos != '?' else reg_pos
self.intron_pos = int(reg_intron) if reg_intron != '' else None
self.mutated = ''
self.initial = del_nuc
else:
# more than one nucleotide deleted
init_pos = init_pos.strip('_') # remove '_' because of regex
pos1 = int(init_pos) if init_pos != '?' else init_pos
pos2 = int(reg_pos) if reg_pos != '?' else reg_pos
self.pos = [pos1, pos2]
intron_tmp1 = int(init_intron) if init_intron != '' else None
intron_tmp2 = int(reg_intron) if reg_intron != '' else None
self.intron_pos = [intron_tmp1, intron_tmp2]
self.mutated = ''
self.initial = del_nuc
else:
self.intron_pos = False
elif self.is_insertion:
ins_pattern = '(?:([0-9?]+)([-+]\d+)?(?:_))?([0-9?]+)([-+]\d+)?ins([A-Z?0-9]+)$'
matches = re.findall(ins_pattern, hgvs_str)
if matches:
init_pos, init_intron, reg_pos, reg_intron, ins_nuc = matches[0]
if not init_pos:
# only one nucleotide inserted
self.pos = int(reg_pos) if reg_pos != '?' else reg_pos
self.intron_pos = int(reg_intron) if reg_intron != '' else None
self.initial = ''
self.mutated = ins_nuc
else:
# more than one nucleotide inserted
init_pos = init_pos.strip('_') # remove '_' because of regex
pos1 = int(init_pos) if init_pos != '?' else init_pos
pos2 = int(reg_pos) if reg_pos != '?' else reg_pos
self.pos = [pos1, pos2]
intron_tmp1 = int(init_intron) if init_intron != '' else None
intron_tmp2 = int(reg_intron) if reg_intron != '' else None
self.intron_pos = [intron_tmp1, intron_tmp2]
self.initial = ''
self.mutated = ins_nuc
else:
self.intron_pos = None
elif self.unknown_effect:
# unknown effect for mutation. usually denoted as c.?
self.intron_pos = None
return
else:
# mutation did not fall into any of the categories. thus it likely
# has invalid syntax
self.is_valid = False
self.intron_pos = None
self.logger.debug('(Parsing-Problem) Invalid HGVS DNA syntax: ' + hgvs_str)
return
| {
"content_hash": "d336069a463d6a9ed96132c84a764e67",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 106,
"avg_line_length": 41.300411522633745,
"alnum_prop": 0.5195296931048227,
"repo_name": "KarchinLab/2020plus",
"id": "d8c74443349140304040e108f36e80652d148a08",
"size": "10036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utils/python/nucleotide.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "260467"
},
{
"name": "Shell",
"bytes": "539"
}
],
"symlink_target": ""
} |
import view
view.main()
| {
"content_hash": "2f5ac2be81f16f683d7a0e7c004814fa",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 11,
"avg_line_length": 13,
"alnum_prop": 0.6923076923076923,
"repo_name": "surru/Three-Musketeers-Game",
"id": "adb2178f58e69c6351de9a57e5248ceec430955d",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interactive game play/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6199"
}
],
"symlink_target": ""
} |
import logging
import os
import shutil
import socket
from jinja2 import Environment, FileSystemLoader
from cekit import tools
from cekit.descriptor import Env, Image, Label, Module, Overrides, Repository
from cekit.errors import CekitError
from cekit.version import version as cekit_version
from cekit.template_helper import TemplateHelper
logger = logging.getLogger('cekit')
class Generator(object):
"""This class process Image descriptor(self.image) and uses it to generate
target directory by fetching all dependencies and artifacts
Args:
descriptor_path - path to an image descriptor
target - path to target directory
builder - builder type
overrides - path to overrides file (can be None)
params - dictionary of builder specific parameterss
"""
def __new__(cls, descriptor_path, target, builder, overrides, params):
if cls is Generator:
if 'docker' == builder or 'buildah' == builder:
from cekit.generator.docker import DockerGenerator as GeneratorImpl
logger.info('Generating files for %s engine.' % builder)
elif 'osbs' == builder:
from cekit.generator.osbs import OSBSGenerator as GeneratorImpl
logger.info('Generating files for OSBS engine.')
else:
raise CekitError("Unsupported generator type: '%s'" % builder)
return super(Generator, cls).__new__(GeneratorImpl)
def __init__(self, descriptor_path, target, builder, overrides, params):
self._type = builder
descriptor = tools.load_descriptor(descriptor_path)
# if there is a local modules directory and no modules are defined
# we will inject it for a backward compatibility
local_mod_path = os.path.join(os.path.abspath(os.path.dirname(descriptor_path)), 'modules')
if os.path.exists(local_mod_path) and 'modules' in descriptor:
modules = descriptor.get('modules')
if not modules.get('repositories'):
modules['repositories'] = [{'path': local_mod_path, 'name': 'modules'}]
self.image = Image(descriptor, os.path.dirname(os.path.abspath(descriptor_path)))
self._overrides = []
self.target = target
self._params = params
self._fetch_repos = False
self._module_registry = ModuleRegistry()
if overrides:
for override in overrides:
# TODO: If the overrides is provided as text, why do we try to get path to it?
logger.debug("Loading override '%s'" % (override))
self._overrides.append(Overrides(tools.load_descriptor(
override), os.path.dirname(os.path.abspath(override))))
# These should always come last
if self._params.get('tech_preview', False):
# Modify the image name, after all other overrides have been processed
self._overrides.append(self.get_tech_preview_overrides())
if self._params.get('redhat', False):
# Add the redhat specific stuff after everything else
self._overrides.append(self.get_redhat_overrides())
logger.info("Initializing image descriptor...")
def init(self):
"""
Initializes the generator.
"""
self.process_image()
self.image.process_defaults()
self.copy_modules()
def generate(self):
self.prepare_repositories()
self.image.remove_none_keys()
self.image.write(os.path.join(self.target, 'image.yaml'))
self.prepare_artifacts()
self.render_dockerfile()
def process_image(self):
"""
Updates the image descriptor based on all overrides and included modules:
1. Applies overrides to the image descriptor
2. Loads modules from defined module repositories
3. Flattens module dependency hierarchy
4. Incorporates global image settings specified by modules into image descriptor
The resulting image descriptor can be used in an 'offline' build mode.
"""
# apply overrides to the image definition
self.apply_image_overrides()
# add build labels
self.add_build_labels()
# load the definitions of the modules
self.build_module_registry()
# process included modules
self.apply_module_overrides()
def apply_image_overrides(self):
self.image.apply_image_overrides(self._overrides)
def add_build_labels(self):
image_labels = self.image.labels
# we will persist cekit version in a label here, so we know which version of cekit
# was used to build the image
image_labels.extend([Label({'name': 'org.concrt.version', 'value': cekit_version}),
Label({'name': 'io.cekit.version', 'value': cekit_version})])
# If we define the label in the image descriptor
# we should *not* override it with value from
# the root's key
if self.image.description and not self.image.label('description'):
image_labels.append(Label({'name': 'description', 'value': self.image.description}))
# Last - if there is no 'summary' label added to image descriptor
# we should use the value of the 'description' key and create
# a 'summary' label with it's content. If there is even that
# key missing - we should not add anything.
description = self.image.label('description')
if not self.image.label('summary') and description:
image_labels.append(Label({'name': 'summary', 'value': description['value']}))
def apply_module_overrides(self):
self.image.apply_module_overrides(self._module_registry)
def build_module_registry(self):
base_dir = os.path.join(self.target, 'repo')
if not os.path.exists(base_dir):
os.makedirs(base_dir)
for repo in self.image.modules.repositories:
logger.debug("Downloading module repository: '%s'" % (repo.name))
repo.copy(base_dir)
self.load_repository(os.path.join(base_dir, repo.target_file_name()))
def load_repository(self, repo_dir):
for modules_dir, _, files in os.walk(repo_dir):
if 'module.yaml' in files:
module_descriptor_path = os.path.abspath(os.path.expanduser(
os.path.normcase(os.path.join(modules_dir, 'module.yaml'))))
module = Module(tools.load_descriptor(module_descriptor_path),
modules_dir,
os.path.dirname(module_descriptor_path))
logger.debug("Adding module '%s', path: '%s'" % (module.name, module.path))
self._module_registry.add_module(module)
def get_tags(self):
return ["%s:%s" % (self.image['name'], self.image[
'version']), "%s:latest" % self.image['name']]
def copy_modules(self):
"""Prepare module to be used for Dockerfile generation.
This means:
1. Place module to args.target/image/modules/ directory
"""
target = os.path.join(self.target, 'image', 'modules')
for module in self.image.modules.install:
module = self._module_registry.get_module(module.name, module.version)
logger.debug("Copying module '%s' required by '%s'."
% (module.name, self.image.name))
dest = os.path.join(target, module.name)
if not os.path.exists(dest):
logger.debug("Copying module '%s' to: '%s'" % (module.name, dest))
shutil.copytree(module.path, dest)
# write out the module with any overrides
module.write(os.path.join(dest, "module.yaml"))
def _generate_expose_services(self):
"""Generate the label io.openshift.expose-services based on the port
definitions."""
ports = []
for p in self.image['ports']:
if p.get('expose', True):
r = "{}/{}".format(p['value'], p.get('protocol', 'tcp'))
if 'service' in p:
r += ":{}".format(p['service'])
ports.append(r)
else:
# attempt to supply a service name by looking up the socket number
try:
service = socket.getservbyport(p['value'], p.get('protocol', 'tcp'))
r += ":{}".format(service)
ports.append(r)
except OSError: # py3
pass
except socket.error: # py2
pass
return ",".join(ports)
def get_tech_preview_overrides(self):
class TechPreviewOverrides(Overrides):
def __init__(self, image):
super(TechPreviewOverrides, self).__init__({}, None)
self._image = image
@property
def name(self):
new_name = self._image.name
if '/' in new_name:
family, new_name = new_name.split('/')
new_name = "%s-tech-preview/%s" % (family, new_name)
else:
new_name = "%s-tech-preview" % new_name
return new_name
return TechPreviewOverrides(self.image)
def get_redhat_overrides(self):
class RedHatOverrides(Overrides):
def __init__(self, generator):
super(RedHatOverrides, self).__init__({}, None)
self._generator = generator
@property
def envs(self):
return [
Env({'name': 'JBOSS_IMAGE_NAME',
'value': '%s' % self._generator.image['name']}),
Env({'name': 'JBOSS_IMAGE_VERSION',
'value': '%s' % self._generator.image['version']})
]
@property
def labels(self):
labels = [
Label({'name': 'name', 'value': '%s' % self._generator.image['name']}),
Label({'name': 'version', 'value': '%s' % self._generator.image['version']})
]
# do not override this label if it's already set
if self._generator.image.get('ports', []) and \
'io.openshift.expose-services' not in [k['name'] for k in self._generator.image['labels']]:
labels.append(Label({'name': 'io.openshift.expose-services',
'value': self._generator._generate_expose_services()}))
return labels
return RedHatOverrides(self)
def render_dockerfile(self):
"""Renders Dockerfile to $target/image/Dockerfile"""
logger.info("Rendering Dockerfile...")
self.image['pkg_manager'] = self._params.get('package_manager', 'yum')
template_file = os.path.join(os.path.dirname(__file__),
'..',
'templates',
'template.jinja')
loader = FileSystemLoader(os.path.dirname(template_file))
env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)
env.globals['helper'] = TemplateHelper(self._module_registry)
env.globals['image'] = self.image
env.globals['addhelp'] = self._params.get('addhelp')
template = env.get_template(os.path.basename(template_file))
dockerfile = os.path.join(self.target,
'image',
'Dockerfile')
if not os.path.exists(os.path.dirname(dockerfile)):
os.makedirs(os.path.dirname(dockerfile))
with open(dockerfile, 'wb') as f:
f.write(template.render(
self.image).encode('utf-8'))
logger.debug("Dockerfile rendered")
if self.image.get('help', {}).get('template', ""):
help_template_path = self.image['help']['template']
elif self._params.get('help_template'):
help_template_path = self._params['help_template']
else:
help_template_path = os.path.join(os.path.dirname(__file__),
'..',
'templates',
'help.jinja')
help_dirname, help_basename = os.path.split(help_template_path)
loader = FileSystemLoader(help_dirname)
env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)
env.globals['helper'] = TemplateHelper(self._module_registry)
help_template = env.get_template(help_basename)
helpfile = os.path.join(self.target, 'image', 'help.md')
with open(helpfile, 'wb') as f:
f.write(help_template.render(
self.image).encode('utf-8'))
logger.debug("help.md rendered")
def prepare_repositories(self):
""" Prepare repositories for build time injection. """
if 'packages' not in self.image:
return
if self.image.get('packages').get('content_sets'):
logger.warning(
'The image has ContentSets repositories specified, all other repositories are removed!')
self.image['packages']['repositories'] = []
repos = self.image.get('packages').get('repositories', [])
injected_repos = []
for repo in repos:
if self._handle_repository(repo):
injected_repos.append(repo)
if self.image.get('packages').get('content_sets'):
url = self._prepare_content_sets(self.image.get('packages').get('content_sets'))
if url:
repo = Repository({'name': 'content_sets_odcs',
'url': {'repository': url}})
injected_repos.append(repo)
self._fetch_repos = True
if self._fetch_repos:
for repo in injected_repos:
repo.fetch(os.path.join(self.target, 'image', 'repos'))
self.image['packages']['repositories_injected'] = injected_repos
else:
self.image['packages']['set_url'] = injected_repos
def _handle_repository(self, repo):
"""Process and prepares all v2 repositories.
Args:
repo a repository to process
Returns True if repository file is prepared and should be injected"""
logger.debug("Loading configuration for repository: '%s' from '%s'."
% (repo['name'],
'repositories-%s' % self._type))
if 'id' in repo:
logger.warning("Repository '%s' is defined as plain. It must be available "
"inside the image as Cekit will not inject it."
% repo['name'])
return False
if 'content_sets' in repo:
self._fetch_repos = True
return self._prepare_content_sets(repo)
elif 'rpm' in repo:
self._prepare_repository_rpm(repo)
return False
elif 'url' in repo:
return True
return False
def _prepare_content_sets(self, content_sets):
raise NotImplementedError("Content sets repository injection not implemented!")
def _prepare_repository_rpm(self, repo):
raise NotImplementedError("RPM repository injection was not implemented!")
def prepare_artifacts(self):
raise NotImplementedError("Artifacts handling is not implemented")
class ModuleRegistry(object):
def __init__(self):
self._modules = {}
def get_module(self, name, version=None):
versions = self._modules.get(name, {})
if version == None:
default = versions.get('default')
if len(versions) > 2: # we always add the first seen as 'default'
logger.warning("Module version not specified for %s, using %s version." %
(name, default.version))
return default
return versions.get(version, None)
def add_module(self, module):
versions = self._modules.get(module.name)
if not versions:
versions = {}
self._modules[module.name] = versions
version = module.version
if not version:
version = 'None'
existing = versions.get(version, None)
if existing:
raise CekitError("Duplicate module (%s:%s) found while processing module repository"
% (module.name, module.version))
if len(versions) == 0:
# for better or worse...
versions['default'] = module
versions[version] = module
| {
"content_hash": "6e8e8c3fe86672558ebeadd0b018e439",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 115,
"avg_line_length": 40.8768115942029,
"alnum_prop": 0.5673934881522189,
"repo_name": "jboss-container-images/concreate",
"id": "dad5d06c3cb91beb89ae5e0ab40b1e7dacc1ece7",
"size": "16948",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cekit/generator/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48"
},
{
"name": "HTML",
"bytes": "3182"
},
{
"name": "Makefile",
"bytes": "747"
},
{
"name": "Python",
"bytes": "90236"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import shutil
import tempfile
import unittest
from .client_api import CodeSearch, XrefNode
from .messages import KytheNodeKind, CompoundResponse, NodeEnumKind, \
CallGraphResponse, Node
from .testing_support import InstallTestRequestHandler, LastRequest, \
TestDataDir, DisableNetwork, EnableNetwork, DumpCallers
SOURCE_ROOT = '/src/chrome/'
class TestCodeSearch(unittest.TestCase):
def setUp(self):
InstallTestRequestHandler()
def tearDown(self):
DumpCallers()
def Touch(self, path):
with open(path, 'w'):
pass
def test_user_agent(self):
TARGET_FILE = '/src/chrome/src/net/http/http_version.h'
codesearch = CodeSearch(source_root=SOURCE_ROOT)
response = codesearch.GetAnnotationsForFile(TARGET_FILE)
self.assertTrue(isinstance(response, CompoundResponse))
self.assertTrue(hasattr(response, 'annotation_response'))
request = LastRequest()
self.assertIsNotNone(request)
self.assertTrue(
request.get_header('User-agent').startswith(
'Python-CodeSearch-Client'))
codesearch = CodeSearch(source_root=SOURCE_ROOT,
user_agent_string='Foo')
codesearch.GetAnnotationsForFile(TARGET_FILE)
request = LastRequest()
self.assertEqual('Foo', request.get_header('User-agent'))
def test_get_signatures_for_symbol(self):
TARGET_FILE = '/src/chrome/src/base/metrics/field_trial.h'
cs = CodeSearch(source_root=SOURCE_ROOT)
signatures = cs.GetSignaturesForSymbol(TARGET_FILE, 'FieldTrial')
self.assertEqual(7, len(signatures))
signatures = cs.GetSignaturesForSymbol(TARGET_FILE, 'FieldTrial',
KytheNodeKind.RECORD_CLASS)
self.assertEqual(2, len(signatures))
signatures = cs.GetSignaturesForSymbol(
TARGET_FILE, 'FieldTrial', KytheNodeKind.FUNCTION_CONSTRUCTOR)
self.assertEqual(3, len(signatures))
def test_gob_revision(self):
TARGET_FILE = '/src/chrome/src/README.md'
cs = CodeSearch(source_root=SOURCE_ROOT)
self.assertEqual('', cs.GetRevision())
cs.GetFileInfo(TARGET_FILE)
self.assertEqual(40, len(cs.GetRevision()))
def test_get_signature_for_symbol(self):
# These values are likely to change pretty often. So this test will
# likely fail each time we refresh the test data corpus. If that
# happens, open up field_trial.h in https://cs.chromium.org and verify
# that the tickets that get picked up by the API make sense.
TARGET_FILE = '/src/chrome/src/base/metrics/field_trial.h'
cs = CodeSearch(source_root=SOURCE_ROOT)
# A class definition. The name appears numerous times in the file.
self.assertEqual(
cs.GetSignatureForSymbol(TARGET_FILE, 'FieldTrial'),
'kythe://chromium.googlesource.com/chromium/src?'
'lang=c%2B%2B?path=src/base/metrics/field_trial.h'
'#FieldTrial%3Abase%23c%23cGxmCcu4cj8')
# An enum defined within the class.
self.assertEqual(
cs.GetSignatureForSymbol(TARGET_FILE, 'RandomizationType'),
'kythe://chromium.googlesource.com/chromium/src?'
'lang=c%2B%2B?path=src/base/metrics/field_trial.h#'
'sffJe7wAnF2I9rS3Yd-8_cTJryczxcrLGG1xREnxhKU')
# A struct field.
self.assertEqual(
cs.GetSignatureForSymbol(TARGET_FILE, 'pickle_size'),
'kythe://chromium.googlesource.com/chromium/src?'
'lang=c%2B%2B?path=src/base/metrics/field_trial.h#'
'w8YJrCAvr5uKFCpnBIfsSMEMlxmFcWDmfykoysBsuHk')
# A parameter to a function.
self.assertEqual(
cs.GetSignatureForSymbol(TARGET_FILE, 'override_entropy_provider'),
'kythe://chromium.googlesource.com/chromium/src?lang=c%2B%2B?'
'path=src/base/metrics/field_trial.h#'
'tkn_wFwczggf4CQsRA0v4FGt6Px7kO4_EOyeqlNUDlY')
def test_search_for_symbol(self):
cs = CodeSearch(source_root='.')
signatures = cs.SearchForSymbol('base::FieldTrial$', NodeEnumKind.CLASS)
self.assertEqual(1, len(signatures))
self.assertTrue(isinstance(signatures[0], XrefNode))
signatures = cs.SearchForSymbol('URLRequestJob', NodeEnumKind.CLASS)
self.assertEqual(1, len(signatures))
self.assertTrue(isinstance(signatures[0], XrefNode))
signatures = cs.SearchForSymbol('BackgroundSyncService::Register',
NodeEnumKind.METHOD)
self.assertEqual(1, len(signatures))
signatures = cs.SearchForSymbol('BackgroundSyncService::Register',
NodeEnumKind.METHOD,
return_all_results=True)
self.assertEqual(2, len(signatures))
def test_get_call_graph(self):
cs = CodeSearch(source_root='.')
refs = cs.SearchForSymbol('HttpAuth::ChooseBestChallenge',
NodeEnumKind.FUNCTION)
self.assertEqual(1, len(refs))
self.assertIsInstance(refs[0], XrefNode)
cg_response = cs.GetCallGraph(signature=refs[0].GetSignature())
self.assertIsInstance(cg_response, CompoundResponse)
self.assertIsInstance(cg_response.call_graph_response[0],
CallGraphResponse)
self.assertIsInstance(cg_response.call_graph_response[0].node, Node)
def test_fixed_cache(self):
fixed_cache_dir = os.path.join(TestDataDir(), 'fixed_cache')
# There are no resources corresponding to the requests that are going to
# be made under this test. Instead there are cached resources. The cache
# expiration is set for 10 years, which should be long enough for
# anybody.
# Note that whenever the request parameters change, the fixed cache will
# stop working. Hence we need to regenerate the test data. To do that:
#
# - Remove all the files from testdata/fixed_cache/*
# - Comment out the DisableNetwork() call below.
# - Run the test in rebaseline mode.
# - *DONT* add any of the new cache entries added to testdata/resource/*
# - *DO* add the new files that show up in testdata/fixed_cache/*
DisableNetwork()
cs = CodeSearch(source_root='.',
should_cache=True,
cache_dir=fixed_cache_dir,
cache_timeout_in_seconds=10 * 365 * 24 * 60 * 60)
try:
signatures = cs.SearchForSymbol('URLRequestHttpJob',
NodeEnumKind.CLASS,
max_results_to_analyze=50)
finally:
EnableNetwork()
cs.TeardownCache()
self.assertEqual(1, len(signatures))
def test_with_cache_dir(self):
test_dir = tempfile.mkdtemp()
try:
cs = CodeSearch(source_root='.',
should_cache=True,
cache_dir=test_dir)
try:
signatures = cs.SearchForSymbol('URLRequestJob',
NodeEnumKind.CLASS)
finally:
cs.TeardownCache()
self.assertEqual(1, len(signatures))
entries = os.listdir(test_dir)
# Test the count of entries. The exact set of entries will change
# from time to time due to changes in queries and repsonses.
self.assertEqual(3, len(entries))
finally:
shutil.rmtree(test_dir)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8387c64b7c314fba2a7458fcbcf108d8",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 80,
"avg_line_length": 40.09183673469388,
"alnum_prop": 0.6138966658182744,
"repo_name": "chromium/codesearch-py",
"id": "622d6513320e57cf6b23dc4074f115d92f87aff3",
"size": "8065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codesearch/test_client_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "166960"
},
{
"name": "Shell",
"bytes": "3573"
}
],
"symlink_target": ""
} |
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.logical_resource_id.alter(name='resource_name')
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.resource_name.alter(name='logical_resource_id')
| {
"content_hash": "4f952ae4585780f4ec6b8329ae086c58",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 29.866666666666667,
"alnum_prop": 0.7142857142857143,
"repo_name": "redhat-openstack/heat",
"id": "02cdd87d1ee6cdd7b54add61adad104edc8caf47",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827027"
},
{
"name": "Shell",
"bytes": "26720"
}
],
"symlink_target": ""
} |
"""Tests for the HomeKit component."""
from __future__ import annotations
import asyncio
import os
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, patch
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA, CATEGORY_TELEVISION
import pytest
from homeassistant import config as hass_config
from homeassistant.components import homekit as homekit_base, zeroconf
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_MOTION,
)
from homeassistant.components.homekit import (
MAX_DEVICES,
STATUS_READY,
STATUS_RUNNING,
STATUS_STOPPED,
STATUS_WAIT,
HomeKit,
)
from homeassistant.components.homekit.accessories import HomeBridge
from homeassistant.components.homekit.const import (
BRIDGE_NAME,
BRIDGE_SERIAL_NUMBER,
CONF_AUTO_START,
DEFAULT_PORT,
DOMAIN,
HOMEKIT,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODE_BRIDGE,
SERVICE_HOMEKIT_RESET_ACCESSORY,
SERVICE_HOMEKIT_START,
SERVICE_HOMEKIT_UNPAIR,
)
from homeassistant.components.homekit.type_triggers import DeviceTriggerAccessory
from homeassistant.components.homekit.util import get_persist_fullpath_for_entry_id
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_DEVICE_ID,
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PORT,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
EVENT_HOMEASSISTANT_STARTED,
PERCENTAGE,
SERVICE_RELOAD,
STATE_ON,
)
from homeassistant.core import HomeAssistantError, State
from homeassistant.helpers import device_registry
from homeassistant.helpers.entityfilter import (
CONF_EXCLUDE_DOMAINS,
CONF_EXCLUDE_ENTITIES,
CONF_EXCLUDE_ENTITY_GLOBS,
CONF_INCLUDE_DOMAINS,
CONF_INCLUDE_ENTITIES,
CONF_INCLUDE_ENTITY_GLOBS,
convert_filter,
)
from homeassistant.setup import async_setup_component
from homeassistant.util import json as json_util
from .util import PATH_HOMEKIT, async_init_entry, async_init_integration
from tests.common import MockConfigEntry
IP_ADDRESS = "127.0.0.1"
def generate_filter(
include_domains,
include_entities,
exclude_domains,
exclude_entites,
include_globs=None,
exclude_globs=None,
):
"""Generate an entity filter using the standard method."""
return convert_filter(
{
CONF_INCLUDE_DOMAINS: include_domains,
CONF_INCLUDE_ENTITIES: include_entities,
CONF_EXCLUDE_DOMAINS: exclude_domains,
CONF_EXCLUDE_ENTITIES: exclude_entites,
CONF_INCLUDE_ENTITY_GLOBS: include_globs or [],
CONF_EXCLUDE_ENTITY_GLOBS: exclude_globs or [],
}
)
@pytest.fixture(autouse=True)
def always_patch_driver(hk_driver):
"""Load the hk_driver fixture."""
def _mock_homekit(hass, entry, homekit_mode, entity_filter=None, devices=None):
return HomeKit(
hass=hass,
name=BRIDGE_NAME,
port=DEFAULT_PORT,
ip_address=None,
entity_filter=entity_filter or generate_filter([], [], [], []),
exclude_accessory_mode=False,
entity_config={},
homekit_mode=homekit_mode,
advertise_ip=None,
entry_id=entry.entry_id,
entry_title=entry.title,
devices=devices,
)
def _mock_homekit_bridge(hass, entry):
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = MagicMock()
return homekit
def _mock_accessories(accessory_count):
accessories = {}
for idx in range(accessory_count + 1):
accessories[idx + 1000] = MagicMock(async_stop=AsyncMock())
return accessories
def _mock_pyhap_bridge():
return MagicMock(
aid=1, accessories=_mock_accessories(10), display_name="HomeKit Bridge"
)
async def test_setup_min(hass, mock_zeroconf):
"""Test async_setup with min config options."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit, patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit.return_value = homekit = Mock()
type(homekit).async_start = AsyncMock()
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
"1.2.3.4",
ANY,
ANY,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
# Test auto start enabled
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_homekit().async_start.called is True
async def test_setup_auto_start_disabled(hass, mock_zeroconf):
"""Test async_setup with auto start disabled and test service calls."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "Test Name", CONF_PORT: 11111, CONF_IP_ADDRESS: "172.0.0.0"},
options={CONF_AUTO_START: False},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit:
mock_homekit.return_value = homekit = Mock()
type(homekit).async_start = AsyncMock()
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
"Test Name",
11111,
"172.0.0.0",
ANY,
ANY,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
# Test auto_start disabled
homekit.reset_mock()
homekit.async_start.reset_mock()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert homekit.async_start.called is False
# Test start call with driver is ready
homekit.reset_mock()
homekit.async_start.reset_mock()
homekit.status = STATUS_READY
await hass.services.async_call(DOMAIN, SERVICE_HOMEKIT_START, blocking=True)
await hass.async_block_till_done()
assert homekit.async_start.called is True
# Test start call with driver started
homekit.reset_mock()
homekit.async_start.reset_mock()
homekit.status = STATUS_STOPPED
await hass.services.async_call(DOMAIN, SERVICE_HOMEKIT_START, blocking=True)
await hass.async_block_till_done()
assert homekit.async_start.called is False
async def test_homekit_setup(hass, hk_driver, mock_zeroconf):
"""Test setup of bridge and driver."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "mock_name", CONF_PORT: 12345},
source=SOURCE_IMPORT,
)
homekit = HomeKit(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
IP_ADDRESS,
True,
{},
{},
HOMEKIT_MODE_BRIDGE,
advertise_ip=None,
entry_id=entry.entry_id,
entry_title=entry.title,
)
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.demo2", "on")
zeroconf_mock = MagicMock()
with patch(f"{PATH_HOMEKIT}.HomeDriver", return_value=hk_driver) as mock_driver:
await hass.async_add_executor_job(homekit.setup, zeroconf_mock)
path = get_persist_fullpath_for_entry_id(hass, entry.entry_id)
mock_driver.assert_called_with(
hass,
entry.entry_id,
BRIDGE_NAME,
entry.title,
loop=hass.loop,
address=IP_ADDRESS,
port=DEFAULT_PORT,
persist_file=path,
advertised_address=None,
async_zeroconf_instance=zeroconf_mock,
)
assert homekit.driver.safe_mode is False
async def test_homekit_setup_ip_address(hass, hk_driver, mock_zeroconf):
"""Test setup with given IP address."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "mock_name", CONF_PORT: 12345},
source=SOURCE_IMPORT,
)
homekit = HomeKit(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
"172.0.0.0",
True,
{},
{},
HOMEKIT_MODE_BRIDGE,
None,
entry_id=entry.entry_id,
entry_title=entry.title,
)
mock_zeroconf = MagicMock()
path = get_persist_fullpath_for_entry_id(hass, entry.entry_id)
with patch(f"{PATH_HOMEKIT}.HomeDriver", return_value=hk_driver) as mock_driver:
await hass.async_add_executor_job(homekit.setup, mock_zeroconf)
mock_driver.assert_called_with(
hass,
entry.entry_id,
BRIDGE_NAME,
entry.title,
loop=hass.loop,
address="172.0.0.0",
port=DEFAULT_PORT,
persist_file=path,
advertised_address=None,
async_zeroconf_instance=mock_zeroconf,
)
async def test_homekit_setup_advertise_ip(hass, hk_driver, mock_zeroconf):
"""Test setup with given IP address to advertise."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "mock_name", CONF_PORT: 12345},
source=SOURCE_IMPORT,
)
homekit = HomeKit(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
"0.0.0.0",
True,
{},
{},
HOMEKIT_MODE_BRIDGE,
"192.168.1.100",
entry_id=entry.entry_id,
entry_title=entry.title,
)
async_zeroconf_instance = MagicMock()
path = get_persist_fullpath_for_entry_id(hass, entry.entry_id)
with patch(f"{PATH_HOMEKIT}.HomeDriver", return_value=hk_driver) as mock_driver:
await hass.async_add_executor_job(homekit.setup, async_zeroconf_instance)
mock_driver.assert_called_with(
hass,
entry.entry_id,
BRIDGE_NAME,
entry.title,
loop=hass.loop,
address="0.0.0.0",
port=DEFAULT_PORT,
persist_file=path,
advertised_address="192.168.1.100",
async_zeroconf_instance=async_zeroconf_instance,
)
async def test_homekit_add_accessory(hass, mock_zeroconf):
"""Add accessory if config exists and get_acc returns an accessory."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entry.add_to_hass(hass)
homekit = _mock_homekit_bridge(hass, entry)
mock_acc = Mock(category="any")
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
homekit.bridge = _mock_pyhap_bridge()
with patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc:
mock_get_acc.side_effect = [None, mock_acc, None]
state = State("light.demo", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 1403373688, {})
assert not homekit.bridge.add_accessory.called
state = State("demo.test", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 600325356, {})
assert homekit.bridge.add_accessory.called
state = State("demo.test_2", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 1467253281, {})
assert homekit.bridge.add_accessory.called
@pytest.mark.parametrize("acc_category", [CATEGORY_TELEVISION, CATEGORY_CAMERA])
async def test_homekit_warn_add_accessory_bridge(
hass, acc_category, mock_zeroconf, caplog
):
"""Test we warn when adding cameras or tvs to a bridge."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entry.add_to_hass(hass)
homekit = _mock_homekit_bridge(hass, entry)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mock_camera_acc = Mock(category=acc_category)
homekit.bridge = _mock_pyhap_bridge()
with patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc:
mock_get_acc.side_effect = [None, mock_camera_acc, None]
state = State("camera.test", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 1508819236, {})
assert not homekit.bridge.add_accessory.called
assert "accessory mode" in caplog.text
async def test_homekit_remove_accessory(hass, mock_zeroconf):
"""Remove accessory from bridge."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = "driver"
homekit.bridge = _mock_pyhap_bridge()
acc_mock = MagicMock()
homekit.bridge.accessories = {6: acc_mock}
acc = homekit.remove_bridge_accessory(6)
assert acc is acc_mock
assert acc_mock.async_stop.called
assert len(homekit.bridge.accessories) == 0
async def test_homekit_entity_filter(hass, mock_zeroconf):
"""Test the entity filter."""
entry = await async_init_integration(hass)
entity_filter = generate_filter(["cover"], ["demo.test"], [], [])
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
homekit.bridge = Mock()
homekit.bridge.accessories = {}
hass.states.async_set("cover.test", "open")
hass.states.async_set("demo.test", "on")
hass.states.async_set("light.demo", "on")
filtered_states = await homekit.async_configure_accessories()
assert hass.states.get("cover.test") in filtered_states
assert hass.states.get("demo.test") in filtered_states
assert hass.states.get("light.demo") not in filtered_states
async def test_homekit_entity_glob_filter(hass, mock_zeroconf):
"""Test the entity filter."""
entry = await async_init_integration(hass)
entity_filter = generate_filter(
["cover"], ["demo.test"], [], [], ["*.included_*"], ["*.excluded_*"]
)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
homekit.bridge = Mock()
homekit.bridge.accessories = {}
hass.states.async_set("cover.test", "open")
hass.states.async_set("demo.test", "on")
hass.states.async_set("cover.excluded_test", "open")
hass.states.async_set("light.included_test", "on")
filtered_states = await homekit.async_configure_accessories()
assert hass.states.get("cover.test") in filtered_states
assert hass.states.get("demo.test") in filtered_states
assert hass.states.get("cover.excluded_test") not in filtered_states
assert hass.states.get("light.included_test") in filtered_states
async def test_homekit_start(hass, hk_driver, mock_zeroconf, device_reg):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
acc = Accessory(hk_driver, "any")
homekit.driver.accessory = acc
connection = (device_registry.CONNECTION_NETWORK_MAC, "AA:BB:CC:DD:EE:FF")
bridge_with_wrong_mac = device_reg.async_get_or_create(
config_entry_id=entry.entry_id,
connections={connection},
manufacturer="Any",
name="Any",
model="Home Assistant HomeKit Bridge",
)
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.demo2", "on")
state = hass.states.async_all()[0]
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
mock_add_acc.assert_any_call(state)
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (Home Assistant Bridge)", ANY, ANY
)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
# Test start() if already started
hk_driver_start.reset_mock()
await homekit.async_start()
await hass.async_block_till_done()
assert not hk_driver_start.called
assert device_reg.async_get(bridge_with_wrong_mac.id) is None
device = device_reg.async_get_device(
{(DOMAIN, entry.entry_id, BRIDGE_SERIAL_NUMBER)}
)
assert device
formatted_mac = device_registry.format_mac(homekit.driver.state.mac)
assert (device_registry.CONNECTION_NETWORK_MAC, formatted_mac) in device.connections
# Start again to make sure the registry entry is kept
homekit.status = STATUS_READY
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
device = device_reg.async_get_device(
{(DOMAIN, entry.entry_id, BRIDGE_SERIAL_NUMBER)}
)
assert device
formatted_mac = device_registry.format_mac(homekit.driver.state.mac)
assert (device_registry.CONNECTION_NETWORK_MAC, formatted_mac) in device.connections
assert len(device_reg.devices) == 1
assert homekit.driver.state.config_version == 1
async def test_homekit_start_with_a_broken_accessory(hass, hk_driver, mock_zeroconf):
"""Test HomeKit start method."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_filter = generate_filter(["cover", "light"], ["demo.test"], [], [])
await async_init_entry(hass, entry)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.broken", "on")
with patch(f"{PATH_HOMEKIT}.get_accessory", side_effect=Exception), patch(
f"{PATH_HOMEKIT}.show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (Home Assistant Bridge)", ANY, ANY
)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
# Test start() if already started
hk_driver_start.reset_mock()
await homekit.async_start()
await hass.async_block_till_done()
assert not hk_driver_start.called
async def test_homekit_start_with_a_device(
hass, hk_driver, mock_zeroconf, demo_cleanup, device_reg, entity_reg
):
"""Test HomeKit start method with a device."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
assert await async_setup_component(hass, "demo", {"demo": {}})
await hass.async_block_till_done()
reg_entry = entity_reg.async_get("light.ceiling_lights")
assert reg_entry is not None
device_id = reg_entry.device_id
await async_init_entry(hass, entry)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, None, devices=[device_id])
homekit.driver = hk_driver
with patch(f"{PATH_HOMEKIT}.get_accessory", side_effect=Exception), patch(
f"{PATH_HOMEKIT}.show_setup_message"
) as mock_setup_msg:
await homekit.async_start()
await hass.async_block_till_done()
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (Home Assistant Bridge)", ANY, ANY
)
assert homekit.status == STATUS_RUNNING
assert isinstance(
list(homekit.driver.accessory.accessories.values())[0], DeviceTriggerAccessory
)
await homekit.async_stop()
async def test_homekit_stop(hass):
"""Test HomeKit stop method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = Mock()
homekit.driver.async_stop = AsyncMock()
homekit.bridge = Mock()
homekit.bridge.accessories = {}
assert homekit.status == STATUS_READY
await homekit.async_stop()
await hass.async_block_till_done()
homekit.status = STATUS_WAIT
await homekit.async_stop()
await hass.async_block_till_done()
homekit.status = STATUS_STOPPED
await homekit.async_stop()
await hass.async_block_till_done()
assert homekit.driver.async_stop.called is False
# Test if driver is started
homekit.status = STATUS_RUNNING
await homekit.async_stop()
await hass.async_block_till_done()
assert homekit.driver.async_stop.called is True
async def test_homekit_reset_accessories(hass, mock_zeroconf):
"""Test resetting HomeKit accessories."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
f"{PATH_HOMEKIT}.accessories.HomeAccessory.run"
) as mock_run, patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 2
assert mock_add_accessory.called
assert mock_run.called
homekit.status = STATUS_READY
async def test_homekit_unpair(hass, device_reg, mock_zeroconf):
"""Test unpairing HomeKit accessories."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
state = homekit.driver.state
state.add_paired_client("client1", "any", b"1")
formatted_mac = device_registry.format_mac(state.mac)
hk_bridge_dev = device_reg.async_get_device(
{}, {(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)}
)
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
{ATTR_DEVICE_ID: hk_bridge_dev.id},
blocking=True,
)
await hass.async_block_till_done()
assert state.paired_clients == {}
homekit.status = STATUS_STOPPED
async def test_homekit_unpair_missing_device_id(hass, device_reg, mock_zeroconf):
"""Test unpairing HomeKit accessories with invalid device id."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
state = homekit.driver.state
state.add_paired_client("client1", "any", b"1")
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
{ATTR_DEVICE_ID: "notvalid"},
blocking=True,
)
await hass.async_block_till_done()
state.paired_clients = {"client1": "any"}
homekit.status = STATUS_STOPPED
async def test_homekit_unpair_not_homekit_device(hass, device_reg, mock_zeroconf):
"""Test unpairing HomeKit accessories with a non-homekit device id."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
not_homekit_entry = MockConfigEntry(
domain="not_homekit", data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
device_entry = device_reg.async_get_or_create(
config_entry_id=not_homekit_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
manufacturer="Tesla",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
state = homekit.driver.state
state.add_paired_client("client1", "any", b"1")
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
{ATTR_DEVICE_ID: device_entry.id},
blocking=True,
)
await hass.async_block_till_done()
state.paired_clients = {"client1": "any"}
homekit.status = STATUS_STOPPED
async def test_homekit_reset_accessories_not_supported(hass, mock_zeroconf):
"""Test resetting HomeKit accessories with an unsupported entity."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "not_supported.demo"
hass.states.async_set("not_supported.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 2
assert not mock_add_accessory.called
assert len(homekit.bridge.accessories) == 0
homekit.status = STATUS_STOPPED
async def test_homekit_reset_accessories_state_missing(hass, mock_zeroconf):
"""Test resetting HomeKit accessories when the state goes missing."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
assert not mock_add_accessory.called
homekit.status = STATUS_STOPPED
async def test_homekit_reset_accessories_not_bridged(hass, mock_zeroconf):
"""Test resetting HomeKit accessories when the state is not bridged."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: "light.not_bridged"},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
assert not mock_add_accessory.called
homekit.status = STATUS_STOPPED
async def test_homekit_reset_single_accessory(hass, mock_zeroconf):
"""Test resetting HomeKit single accessory."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
f"{PATH_HOMEKIT}.accessories.HomeAccessory.run"
) as mock_run:
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
homekit.driver.accessory = acc_mock
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert mock_run.called
assert hk_driver_config_changed.call_count == 1
homekit.status = STATUS_READY
async def test_homekit_reset_single_accessory_unsupported(hass, mock_zeroconf):
"""Test resetting HomeKit single accessory with an unsupported entity."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "not_supported.demo"
hass.states.async_set("not_supported.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
homekit.driver.accessory = acc_mock
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
homekit.status = STATUS_STOPPED
async def test_homekit_reset_single_accessory_state_missing(hass, mock_zeroconf):
"""Test resetting HomeKit single accessory when the state goes missing."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
homekit.driver.accessory = acc_mock
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
homekit.status = STATUS_STOPPED
async def test_homekit_reset_single_accessory_no_match(hass, mock_zeroconf):
"""Test resetting HomeKit single accessory when the entity id does not match."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
homekit.driver.accessory = acc_mock
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: "light.no_match"},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
homekit.status = STATUS_STOPPED
async def test_homekit_too_many_accessories(hass, hk_driver, caplog, mock_zeroconf):
"""Test adding too many accessories to HomeKit."""
entry = await async_init_integration(hass)
entity_filter = generate_filter(["cover", "light"], ["demo.test"], [], [])
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
def _mock_bridge(*_):
mock_bridge = HomeBridge(hass, hk_driver, "mock_bridge")
# The bridge itself counts as an accessory
mock_bridge.accessories = range(MAX_DEVICES)
return mock_bridge
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.demo2", "on")
hass.states.async_set("light.demo3", "on")
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.show_setup_message"
), patch(f"{PATH_HOMEKIT}.HomeBridge", _mock_bridge):
await homekit.async_start()
await hass.async_block_till_done()
assert "would exceed" in caplog.text
async def test_homekit_finds_linked_batteries(
hass, hk_driver, device_reg, entity_reg, mock_zeroconf
):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = MagicMock()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
manufacturer="Tesla",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
binary_charging_sensor = entity_reg.async_get_or_create(
"binary_sensor",
"powerwall",
"battery_charging",
device_id=device_entry.id,
device_class=DEVICE_CLASS_BATTERY_CHARGING,
)
battery_sensor = entity_reg.async_get_or_create(
"sensor",
"powerwall",
"battery",
device_id=device_entry.id,
device_class=DEVICE_CLASS_BATTERY,
)
light = entity_reg.async_get_or_create(
"light", "powerwall", "demo", device_id=device_entry.id
)
hass.states.async_set(
binary_charging_sensor.entity_id,
STATE_ON,
{ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY_CHARGING},
)
hass.states.async_set(
battery_sensor.entity_id, 30, {ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY}
)
hass.states.async_set(light.entity_id, STATE_ON)
with patch(f"{PATH_HOMEKIT}.show_setup_message"), patch(
f"{PATH_HOMEKIT}.get_accessory"
) as mock_get_acc, patch("pyhap.accessory_driver.AccessoryDriver.async_start"):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"manufacturer": "Tesla",
"model": "Powerwall 2",
"sw_version": "0.16.0",
"platform": "test",
"linked_battery_charging_sensor": "binary_sensor.powerwall_battery_charging",
"linked_battery_sensor": "sensor.powerwall_battery",
},
)
async def test_homekit_async_get_integration_fails(
hass, hk_driver, device_reg, entity_reg, mock_zeroconf
):
"""Test that we continue if async_get_integration fails."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = HomeBridge(hass, hk_driver, "mock_bridge")
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
binary_charging_sensor = entity_reg.async_get_or_create(
"binary_sensor",
"invalid_integration_does_not_exist",
"battery_charging",
device_id=device_entry.id,
device_class=DEVICE_CLASS_BATTERY_CHARGING,
)
battery_sensor = entity_reg.async_get_or_create(
"sensor",
"invalid_integration_does_not_exist",
"battery",
device_id=device_entry.id,
device_class=DEVICE_CLASS_BATTERY,
)
light = entity_reg.async_get_or_create(
"light", "invalid_integration_does_not_exist", "demo", device_id=device_entry.id
)
hass.states.async_set(
binary_charging_sensor.entity_id,
STATE_ON,
{ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY_CHARGING},
)
hass.states.async_set(
battery_sensor.entity_id, 30, {ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY}
)
hass.states.async_set(light.entity_id, STATE_ON)
with patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.show_setup_message"
), patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"model": "Powerwall 2",
"sw_version": "0.16.0",
"platform": "invalid_integration_does_not_exist",
"linked_battery_charging_sensor": "binary_sensor.invalid_integration_does_not_exist_battery_charging",
"linked_battery_sensor": "sensor.invalid_integration_does_not_exist_battery",
},
)
async def test_yaml_updates_update_config_entry_for_name(hass, mock_zeroconf):
"""Test async_setup with imported config."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_IMPORT,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit, patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit.return_value = homekit = Mock()
type(homekit).async_start = AsyncMock()
assert await async_setup_component(
hass, "homekit", {"homekit": {CONF_NAME: BRIDGE_NAME, CONF_PORT: 12345}}
)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
BRIDGE_NAME,
12345,
"1.2.3.4",
ANY,
ANY,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
# Test auto start enabled
mock_homekit.reset_mock()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
mock_homekit().async_start.assert_called()
async def test_homekit_uses_system_zeroconf(hass, hk_driver, mock_zeroconf):
"""Test HomeKit uses system zeroconf."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
assert await async_setup_component(hass, "zeroconf", {"zeroconf": {}})
system_async_zc = await zeroconf.async_get_async_instance(hass)
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.HomeKit.async_stop"
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert (
hass.data[DOMAIN][entry.entry_id][HOMEKIT].driver.advertiser
== system_async_zc
)
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
def _write_data(path: str, data: dict) -> None:
"""Write the data."""
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
json_util.save_json(path, data)
async def test_homekit_ignored_missing_devices(
hass, hk_driver, device_reg, entity_reg, mock_zeroconf
):
"""Test HomeKit handles a device in the entity registry but missing from the device registry."""
await async_setup_component(hass, "persistent_notification", {})
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = _mock_pyhap_bridge()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
manufacturer="Tesla",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
"binary_sensor",
"powerwall",
"battery_charging",
device_id=device_entry.id,
device_class=DEVICE_CLASS_BATTERY_CHARGING,
)
entity_reg.async_get_or_create(
"sensor",
"powerwall",
"battery",
device_id=device_entry.id,
device_class=DEVICE_CLASS_BATTERY,
)
light = entity_reg.async_get_or_create(
"light", "powerwall", "demo", device_id=device_entry.id
)
before_removal = entity_reg.entities.copy()
# Delete the device to make sure we fallback
# to using the platform
device_reg.async_remove_device(device_entry.id)
# Wait for the entities to be removed
await asyncio.sleep(0)
await asyncio.sleep(0)
# Restore the registry
entity_reg.entities = before_removal
hass.states.async_set(light.entity_id, STATE_ON)
hass.states.async_set("light.two", STATE_ON)
with patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
f"{PATH_HOMEKIT}.HomeBridge", return_value=homekit.bridge
), patch("pyhap.accessory_driver.AccessoryDriver.async_start"):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_any_call(
hass,
ANY,
ANY,
ANY,
{
"platform": "Tesla Powerwall",
"linked_battery_charging_sensor": "binary_sensor.powerwall_battery_charging",
"linked_battery_sensor": "sensor.powerwall_battery",
},
)
async def test_homekit_finds_linked_motion_sensors(
hass, hk_driver, device_reg, entity_reg, mock_zeroconf
):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = HomeBridge(hass, hk_driver, "mock_bridge")
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Camera Server",
manufacturer="Ubq",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
binary_motion_sensor = entity_reg.async_get_or_create(
"binary_sensor",
"camera",
"motion_sensor",
device_id=device_entry.id,
device_class=DEVICE_CLASS_MOTION,
)
camera = entity_reg.async_get_or_create(
"camera", "camera", "demo", device_id=device_entry.id
)
hass.states.async_set(
binary_motion_sensor.entity_id,
STATE_ON,
{ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION},
)
hass.states.async_set(camera.entity_id, STATE_ON)
with patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.show_setup_message"
), patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"manufacturer": "Ubq",
"model": "Camera Server",
"platform": "test",
"sw_version": "0.16.0",
"linked_motion_sensor": "binary_sensor.camera_motion_sensor",
},
)
async def test_homekit_finds_linked_humidity_sensors(
hass, hk_driver, device_reg, entity_reg, mock_zeroconf
):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = HomeBridge(hass, hk_driver, "mock_bridge")
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.1",
model="Smart Brainy Clever Humidifier",
manufacturer="Home Assistant",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
humidity_sensor = entity_reg.async_get_or_create(
"sensor",
"humidifier",
"humidity_sensor",
device_id=device_entry.id,
device_class=DEVICE_CLASS_HUMIDITY,
)
humidifier = entity_reg.async_get_or_create(
"humidifier", "humidifier", "demo", device_id=device_entry.id
)
hass.states.async_set(
humidity_sensor.entity_id,
"42",
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
hass.states.async_set(humidifier.entity_id, STATE_ON)
with patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.show_setup_message"
), patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"manufacturer": "Home Assistant",
"model": "Smart Brainy Clever Humidifier",
"platform": "test",
"sw_version": "0.16.1",
"linked_humidity_sensor": "sensor.humidifier_humidity_sensor",
},
)
async def test_reload(hass, mock_zeroconf):
"""Test we can reload from yaml."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_IMPORT,
data={CONF_NAME: "reloadable", CONF_PORT: 12345},
options={},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit, patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit.return_value = homekit = Mock()
assert await async_setup_component(
hass, "homekit", {"homekit": {CONF_NAME: "reloadable", CONF_PORT: 12345}}
)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
"reloadable",
12345,
"1.2.3.4",
ANY,
False,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
yaml_path = os.path.join(
_get_fixtures_base_path(),
"fixtures",
"homekit/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path), patch(
f"{PATH_HOMEKIT}.HomeKit"
) as mock_homekit2, patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.show_setup_message"
), patch(
f"{PATH_HOMEKIT}.get_accessory"
), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit2.return_value = homekit = Mock()
await hass.services.async_call(
"homekit",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
mock_homekit2.assert_any_call(
hass,
"reloadable",
45678,
"1.2.3.4",
ANY,
False,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
def _get_fixtures_base_path():
return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
async def test_homekit_start_in_accessory_mode(
hass, hk_driver, mock_zeroconf, device_reg
):
"""Test HomeKit start method in accessory mode."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("light.demo", "on")
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
mock_add_acc.assert_not_called()
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (demo)", ANY, ANY
)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
async def test_homekit_start_in_accessory_mode_unsupported_entity(
hass, hk_driver, mock_zeroconf, device_reg, caplog
):
"""Test HomeKit start method in accessory mode with an unsupported entity."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("notsupported.demo", "on")
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
assert not mock_add_acc.called
assert not mock_setup_msg.called
assert not hk_driver_start.called
assert homekit.status == STATUS_WAIT
assert "entity not supported" in caplog.text
async def test_homekit_start_in_accessory_mode_missing_entity(
hass, hk_driver, mock_zeroconf, device_reg, caplog
):
"""Test HomeKit start method in accessory mode when entity is not available."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.show_setup_message"
), patch("pyhap.accessory_driver.AccessoryDriver.async_start"):
await homekit.async_start()
await hass.async_block_till_done()
mock_add_acc.assert_not_called()
assert homekit.status == STATUS_WAIT
assert "entity not available" in caplog.text
async def test_wait_for_port_to_free(hass, hk_driver, mock_zeroconf, caplog):
"""Test we wait for the port to free before declaring unload success."""
await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
entry.add_to_hass(hass)
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.HomeKit.async_stop"
), patch(f"{PATH_HOMEKIT}.port_is_available", return_value=True) as port_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert "Waiting for the HomeKit server to shutdown" not in caplog.text
assert port_mock.called
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.HomeKit.async_stop"
), patch.object(homekit_base, "PORT_CLEANUP_CHECK_INTERVAL_SECS", 0), patch(
f"{PATH_HOMEKIT}.port_is_available", return_value=False
) as port_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert "Waiting for the HomeKit server to shutdown" in caplog.text
assert port_mock.called
| {
"content_hash": "8d4918503a800c744dc96ef84aa9e3f5",
"timestamp": "",
"source": "github",
"line_count": 1715,
"max_line_length": 114,
"avg_line_length": 33.9667638483965,
"alnum_prop": 0.6441899987983452,
"repo_name": "sander76/home-assistant",
"id": "4976985fa156de4e0717748fd5cf2cf7af7b3276",
"size": "58253",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/homekit/test_homekit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from portfolio import create_app
manager = Manager(create_app)
manager.add_command("assets", ManageAssets)
if __name__ == "__main__":
manager.run()
| {
"content_hash": "243de0f8fb516de1be3c8d2ef8dbfd94",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 23.4,
"alnum_prop": 0.7393162393162394,
"repo_name": "timesqueezer/portfolio",
"id": "f2c39b4c17a671b2ac7b1140735826bdd668b99d",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2518"
},
{
"name": "HTML",
"bytes": "8631"
},
{
"name": "Makefile",
"bytes": "427"
},
{
"name": "Python",
"bytes": "2562"
}
],
"symlink_target": ""
} |
"""
Utilities for NetApp FAS drivers.
This module contains common utilities to be used by one or more
NetApp FAS drivers to achieve the desired functionality.
"""
import json
import socket
from oslo_config import cfg
from oslo_log import log
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp import options as na_opts
LOG = log.getLogger(__name__)
CONF = cfg.CONF
def get_backend_configuration(backend_name):
"""Get a cDOT configuration object for a specific backend."""
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration. Available stanzas are %(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
}
raise exception.ConfigNotFound(message=msg % params)
config = configuration.Configuration(driver.volume_opts,
config_group=backend_name)
config.append_config_values(na_opts.netapp_proxy_opts)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_cluster_opts)
config.append_config_values(na_opts.netapp_san_opts)
config.append_config_values(na_opts.netapp_replication_opts)
return config
def get_client_for_backend(backend_name, vserver_name=None):
"""Get a cDOT API client for a specific backend."""
config = get_backend_configuration(backend_name)
client = client_cmode.Client(
transport_type=config.netapp_transport_type,
username=config.netapp_login,
password=config.netapp_password,
hostname=config.netapp_server_hostname,
port=config.netapp_server_port,
vserver=vserver_name or config.netapp_vserver,
trace=utils.TRACE_API)
return client
def _build_base_ems_log_message(driver_name, app_version):
ems_log = {
'computer-name': socket.gethostname() or 'Cinder_node',
'event-source': 'Cinder driver %s' % driver_name,
'app-version': app_version,
'category': 'provisioning',
'log-level': '5',
'auto-support': 'false',
}
return ems_log
def build_ems_log_message_0(driver_name, app_version):
"""Construct EMS Autosupport log message with deployment info."""
ems_log = _build_base_ems_log_message(driver_name, app_version)
ems_log['event-id'] = '0'
ems_log['event-description'] = 'OpenStack Cinder connected to cluster node'
return ems_log
def build_ems_log_message_1(driver_name, app_version, vserver,
flexvol_pools, aggregate_pools):
"""Construct EMS Autosupport log message with storage pool info."""
message = {
'pools': {
'vserver': vserver,
'aggregates': aggregate_pools,
'flexvols': flexvol_pools,
},
}
ems_log = _build_base_ems_log_message(driver_name, app_version)
ems_log['event-id'] = '1'
ems_log['event-description'] = json.dumps(message)
return ems_log
| {
"content_hash": "923f85ba617d04337c909581a5fa8d8d",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 33.19047619047619,
"alnum_prop": 0.6760401721664275,
"repo_name": "phenoxim/cinder",
"id": "dc9a6d30d976929c64e5488af2aab2dc4132917b",
"size": "4057",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/dataontap/utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20325688"
},
{
"name": "Shell",
"bytes": "16353"
}
],
"symlink_target": ""
} |
toolkit = 'choose'
family = None
class ToolKitError(Exception):
pass
def use(name):
"""
Set the name of the GUI toolkit we should use.
"""
global toolkit, family
name = name.lower()
if name.startswith('choose'):
pass
elif name.startswith('qt') or name.startswith('pyside'):
family = 'qt'
if name == 'qt':
name = 'qt5'
if name not in ('qt5', 'pyside2', 'qt6', 'pyside6'):
raise ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('gtk'):
# default for "gtk" is gtk3
if name in ('gtk', 'gtk3'):
name = 'gtk3'
family = 'gtk3'
assert name in ['gtk3'], \
ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('tk'):
family = 'tk'
assert name in ('tk', ), \
ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('pg'):
family = 'pg'
assert name in ('pg', ), \
ToolKitError("ToolKit '%s' not supported!" % (name))
else:
ToolKitError("ToolKit '%s' not supported!" % (name))
toolkit = name
def get_toolkit():
return toolkit
def get_family():
return family
def get_rv_toolkits():
"""Returns a list of reference viewer supported toolkits."""
return ['qt4', 'qt5', 'pyside', 'pyside2', 'gtk3', 'pg']
def choose():
try:
from ginga.qtw import QtHelp # noqa
except ImportError:
try:
from ginga.gtkw3 import GtkHelp # noqa
except ImportError:
raise ImportError("qt or gtk variants not found")
# END
| {
"content_hash": "b839873701a3080c7373ccfadf6f6149",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 70,
"avg_line_length": 22.904109589041095,
"alnum_prop": 0.5496411483253588,
"repo_name": "pllim/ginga",
"id": "c8d1f01c5696568bf24b6b0da761b6bc1ee96ba6",
"size": "1852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/toolkit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4359761"
}
],
"symlink_target": ""
} |
import string, cgi, time
import sys
sys.path.insert(0, 'PyWebPlug')
from wsserver import *
from time import sleep
def setupMessages():
return
class Client:
def __init__(self, socket):
self.socket = socket
self.needsConfirmation = True
def handle(self):
if (self.socket):
try:
data = self.socket.readRaw()
except:
self.socket = None
if len(data) == 0:
return
print("Data:", data)
if self.needsConfirmation:
code = data[3:7]
if code == "0000":
print("Becoming a host!")
self.becomeHost()
else:
print("Trying to find host", code)
self.host = findHost(code)
if self.host:
print("Found host.")
self.confirm()
else:
print("No host found.")
else:
if self.host.socket:
try:
self.host.socket.send(data)
except:
self.host.socket = None
print("Host's socket is closed.")
# This is called to confirm to the client that they have been accepted,
# after they send us their details.
def confirm(self):
self.pID = self.host.getNextpID()
self.host.players[self.pID] = self
self.needsConfirmation = False
self.sID = extend(self.pID, 2)
self.socket.send("999" + self.sID)
self.host.socket.send("998" + self.sID)
def becomeHost(self):
host = Host(self.socket, newHostCode())
clients.remove(self)
hosts.append(host)
def disconnect(self):
print("Lost client...")
clients.remove(self)
self.socket = None
return
class Host:
def __init__(self, socket, hostCode):
self.socket = socket
self.hostCode = hostCode
self.players = {}
self.pID = 0
self.socket.send("999" + str(self.hostCode))
self.writingTo = 0
self.data = ""
def getNextpID(self):
self.pID += 1
return self.pID
def handle(self):
if (self.socket):
try:
self.data += self.socket.readRaw()
except:
self.socket = None
if len(self.data) == 0:
return
print("Host says: "+self.data)
ind = self.data.find("*")
if (ind < 0):
return
if self.writingTo == 0:
try:
self.writingTo = int(self.data[0:2])
except:
self.data = self.data[1:]
self.handle()
return;
pID = self.writingTo
if self.players[pID]:
if self.players[pID].socket:
try:
self.players[pID].socket.send(self.data[2:ind])
except:
self.players[pID].socket = None;
print("Client's socket closed.")
else:
print("Host", self.hostCode," tried to send a messaged to non-existant player", pID)
self.data = self.data[ind+2:]
self.writingTo = 0
def disconnect(self):
print("Lost host.")
hosts.remove(self)
self.socket = None
return
def findHost(code):
for host in hosts:
if host.hostCode == code:
return host
return None
def newHostCode():
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
code = ''.join(chars[int(random.random()*26)] for _ in range(4))
if findHost(code):
return newHostCode()
return code
def extend(v, l):
out = str(v)
while len(out) < l:
out = "0" + out
return out
# This handles a new client.
# We need to hand them to an object
# so that we can read and write from it
def handle(socket):
global clients
client = Client(socket)
clients.append(client)
def main():
global gameStarted
global stage
try:
setupMessages()
server = startServer()
while True:
newClient = handleNetwork()
if newClient:
handle(newClient)
for client in clients:
client.handle()
for host in hosts:
host.handle()
sleep(0.01)
except KeyboardInterrupt:
print(' received, closing server.')
server.close()
clients = []
hosts = []
pID = 0
if __name__ == '__main__':
main()
| {
"content_hash": "f51301a132fb08f091790cfd631b18d7",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 96,
"avg_line_length": 26.074285714285715,
"alnum_prop": 0.5084374315143546,
"repo_name": "ChrisFadden/PartyTowers",
"id": "de3244435c09cafe6d132fa82bccb0ac8be6a1bc",
"size": "4597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webrouter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "51773"
},
{
"name": "CMake",
"bytes": "742"
},
{
"name": "CSS",
"bytes": "2329"
},
{
"name": "HTML",
"bytes": "3212"
},
{
"name": "JavaScript",
"bytes": "5952"
},
{
"name": "Python",
"bytes": "6314"
}
],
"symlink_target": ""
} |
import argparse
from typing import Optional
from drillsrs import db, util
from drillsrs.cmd.command_base import CommandBase
class CreateTagCommand(CommandBase):
names = ["add-tag", "create-tag"]
description = "add a new tag to a deck"
def decorate_arg_parser(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"deck", nargs="?", help="choose the deck to add the card to"
)
parser.add_argument(
"-n", "--name", required=True, help="set the tag's name"
)
parser.add_argument(
"-c",
"--color",
required=False,
choices=util.COLOR_TAGS.keys(),
help="set the tag's color",
default=list(util.COLOR_TAGS.keys())[0],
)
def run(self, args: argparse.Namespace) -> None:
deck_name: str = args.deck
tag_name: str = args.name
tag_color: Optional[str] = args.color
with db.session_scope() as session:
deck = db.get_deck_by_name(session, deck_name)
tag = db.Tag()
tag.name = tag_name
tag.color = tag_color
deck.tags.append(tag)
| {
"content_hash": "af8ded9bc05ff3a173c044712db69989",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 30.487179487179485,
"alnum_prop": 0.5668629100084104,
"repo_name": "rr-/drill",
"id": "b6993f90c06b46207307bef15bf3841ace0d2979",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drillsrs/cmd/create_tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52313"
},
{
"name": "Smarty",
"bytes": "14895"
}
],
"symlink_target": ""
} |
import pytest, sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../")
from unittest import TestCase
from pylogic.knowledge_base import KnowledgeBase
from pylogic.decorators import case
from pylogic.exceptions import ArityError
from pylogic.functions import _
def add_parent_strain(base):
@case(base)
def parent(father, son):
pass
return parent
def add_brother_strain(base):
@case(base)
def brothers(firstBorn, secondBorn):
pass
return brothers
def set_up_parents(base):
parent = add_parent_strain(base)
parent("Bob", "Caleb")
parent("Bob", "George")
parent("Bob", "Tim")
return parent
def set_up_brothers(base):
brothers = add_brother_strain(base)
brothers("Caleb", "George")
brothers("Caleb", "Tim")
brothers("George", "Tim")
brothers("George", "Caleb")
brothers("Tim", "Caleb")
brothers("Tim", "George")
return brothers
def set_up_strain(base):
return set_up_brothers(base), set_up_parents(base)
class TestDecoratorCase(TestCase):
def test_add_case(self):
base = KnowledgeBase(2)
parent = add_parent_strain(base)
parent("bob", "tim")
assert base.tally("parent", "bob", "tim") is True
def test_arity_error(self):
with pytest.raises(ArityError):
base = KnowledgeBase(2)
@case(base)
def brothers(firstBorn, secondBorn, thirdBorn):
pass
brothers("Caleb", "George", "Paul")
def test_add_case2(self):
base = KnowledgeBase(2)
brothers = add_brother_strain(base)
brothers("Caleb", "George")
assert base.tally("brothers", "Caleb", "George") is True
def test_base_strains_corrects(self):
base = KnowledgeBase(2)
set_up_strain(base)
assert base.amount_of_strains() == 2
assert base.tally("parent", "Bob", "Caleb") is True
assert base.tally("brothers", "Caleb", "Tim") is True
assert base.tally("brothers", "Tim", "Caleb") is True
def test_base_strains_fails(self):
base = KnowledgeBase(2)
set_up_strain(base)
assert base.tally("parent", "Tim", _) == []
assert base.tally("parent", "George", "Tim") is False
assert base.tally("brothers", "Gina", _) == []
def test_base_strains_list_results(self):
base = KnowledgeBase(2)
brothers, parent = set_up_strain(base)
assert base.tally("parent", "Bob", _) == [("Bob", "Caleb"), ("Bob", "George"), ("Bob", "Tim")]
assert base.tally("brothers", "Caleb", _) == [("Caleb", "George"), ("Caleb", "Tim")]
assert base.tally("brothers", "Tim", _) == [("Tim", "Caleb"), ("Tim", "George")] | {
"content_hash": "d54256bc080304a3bafe623a8371a65e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 102,
"avg_line_length": 28,
"alnum_prop": 0.6009475218658892,
"repo_name": "fran-bravo/pylogic-module",
"id": "98bc2045df6fb1f32c84f57eb3e9666dff70687a",
"size": "2744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_case_decorator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32366"
}
],
"symlink_target": ""
} |
from django.db import models
# Create your models here.
class Messages(models.Model):
state = models.CharField('State', max_length=64)
city = models.CharField('City', max_length=64)
username = models.CharField('User', max_length=64)
message = models.TextField('Message')
create_time = models.DateTimeField('Date', auto_now_add=True)
class Meta:
ordering = ['state', 'city', 'create_time'] | {
"content_hash": "e80db05b734919acf11646b5c55ee745",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.6808510638297872,
"repo_name": "jasonrhaas/ducking-adventure",
"id": "63f50af5a4a418ab54ccc9f44c066105bae62d6d",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/skynet/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7396"
}
],
"symlink_target": ""
} |
import test_collective_api_base as test_base
import paddle
import paddle.distributed as dist
import paddle.fluid as fluid
class TestCollectiveIsendIrecvAPI(test_base.TestCollectiveAPIRunnerBase):
def __init__(self):
self.global_ring_id = 0
def get_model(self, main_prog, startup_program, rank, indata=None):
with fluid.program_guard(main_prog, startup_program):
# NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16
if indata.dtype == "bfloat16":
tindata = paddle.to_tensor(indata, "float32").cast("uint16")
if rank == 0:
task = dist.isend(tindata, dst=1)
else:
task = dist.irecv(tindata, src=0)
task.wait()
return [tindata.cast("float32").numpy()]
else:
tindata = paddle.to_tensor(indata)
if rank == 0:
task = dist.isend(tindata, dst=1)
else:
task = dist.irecv(tindata, src=0)
task.wait()
return [tindata.numpy()]
if __name__ == "__main__":
test_base.runtime_main(TestCollectiveIsendIrecvAPI, "sendrecv")
| {
"content_hash": "4c0714e12e0b28262bdeccb37ff08851",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 119,
"avg_line_length": 37.35294117647059,
"alnum_prop": 0.5645669291338583,
"repo_name": "PaddlePaddle/Paddle",
"id": "3637a6f4343b53b54418de99265ae191eca125fd",
"size": "1881",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/collective/collective_isend_irecv_api_dygraph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
from openturns import *
TESTPREAMBLE()
try:
hermite = HermiteFactory()
print "hermite=", hermite
for i in range(10):
print "hermite(", i, ")=", hermite.build(i)
roots = hermite.getRoots(10)
print "hermite(10) roots=", repr(roots)
nodes, weights = hermite.getNodesAndWeights(10)
print "hermite(10) nodes=", nodes, "and weights=", weights
except:
import sys
print "t_HermiteFactory_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "253e4144169ef0faa80609d5a80b46eb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 28.9375,
"alnum_prop": 0.6565874730021598,
"repo_name": "sofianehaddad/ot-svn",
"id": "ae5316fcfb9aa889daa2136b7fe3e9bad9ab6581",
"size": "487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_HermiteFactory_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
# URI for this module
uri = "/core/update_servers"
requires_modules = None
requires_version = None
def get_all(isamAppliance, check_mode=False, force=False):
"""
Get all Update Servers
"""
return isamAppliance.invoke_get("Get Update Servers", uri, requires_modules=requires_modules,
requires_version=requires_version)
def get(isamAppliance, name, check_mode=False, force=False):
"""
Retrieve a specific update server
"""
ret_obj = search(isamAppliance, name=name, check_mode=check_mode, force=force)
us_id = ret_obj['data']
if us_id == {}:
logger.info("Update Server {0} had no match, skipping retrieval.".format(name))
return isamAppliance.create_return_object()
else:
return _get(isamAppliance, us_id)
def _get(isamAppliance, us_id):
return isamAppliance.invoke_get("Retrieve a specific update server",
"{0}/{1}".format(uri, us_id))
def search(isamAppliance, name, force=False, check_mode=False):
"""
Search update server by name
"""
ret_obj = get_all(isamAppliance)
return_obj = isamAppliance.create_return_object()
for obj in ret_obj['data']['luServers']:
if obj['name'] == name:
logger.info("Found Update Server {0} id: {1}".format(name, obj['uuid']))
return_obj['data'] = obj['uuid']
return_obj['rc'] = 0
return return_obj
def set(isamAppliance, priority, name, enabled, hostName, port, trustLevel, useProxy=False, useProxyAuth=False,
cert=None, proxyHost=None, proxyPort=None, proxyUser=None, proxyPwd=None, new_name=None, check_mode=False,
force=False):
"""
Creating or Modifying a update server
"""
if (search(isamAppliance, name=name))['data'] == {}:
# Force the add - we already know update server does not exist
logger.info("Update Server {0} had no match, requesting to add new one.".format(name))
return add(isamAppliance, priority, name, enabled, hostName, port, trustLevel, useProxy,
useProxyAuth, cert, proxyHost, proxyPort, proxyUser, proxyPwd, check_mode, True)
else:
# Update request
logger.info("Update Server {0} exists, requesting to update.".format(name))
return update(isamAppliance, priority, name, enabled, hostName, port, trustLevel, useProxy, useProxyAuth, cert,
proxyHost, proxyPort, proxyUser, proxyPwd, new_name, check_mode, force)
def add(isamAppliance, priority, name, enabled, hostName, port, trustLevel, useProxy=False, useProxyAuth=False, cert="",
proxyHost=None, proxyPort=None, proxyUser=None, proxyPwd=None, check_mode=False, force=False):
"""
Add a Update Server
"""
if force is True or search(isamAppliance, name=name) == {}:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {"priority": priority,
"name": name,
"enabled": enabled,
"hostName": hostName,
"port": port,
"trustLevel": trustLevel,
"useProxy": useProxy,
"useProxyAuth": useProxyAuth,
"_isNew": True,
"cert": cert,
"proxyHost": proxyHost,
"proxyPort": proxyPort,
"proxyUser": proxyUser,
"proxyPwd": proxyPwd}
return isamAppliance.invoke_post("Add a Update Server", uri, json_data, requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object()
def update(isamAppliance, priority, name, enabled, hostName, port, trustLevel, useProxy=False, useProxyAuth=False,
cert="", proxyHost=None, proxyPort=None, proxyUser=None, proxyPwd=None, new_name=None, check_mode=False,
force=False):
"""
Update an update server's details.
"""
us_id, update_required, json_data = _check(isamAppliance, priority, name, enabled, hostName, port, trustLevel,
useProxy, useProxyAuth, cert, proxyHost, proxyPort, proxyUser, proxyPwd,
new_name)
if force is True or update_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put("Update an update server", "{0}/{1}".format(uri, us_id), json_data,
requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object()
def _check(isamAppliance, priority, name, enabled, hostName, port, trustLevel, useProxy, useProxyAuth,
cert, proxyHost, proxyPort, proxyUser, proxyPwd, new_name):
update_required = False
json_data = {"priority": priority,
"name": name,
"enabled": enabled,
"hostName": hostName,
"port": port,
"trustLevel": trustLevel,
"useProxy": useProxy,
"useProxyAuth": useProxyAuth,
"cert": cert,
"proxyHost": proxyHost,
"proxyPort": proxyPort,
"proxyUser": proxyUser,
"proxyPwd": proxyPwd}
ret_obj = get(isamAppliance, name)
if ret_obj['data'] == {}:
logger.warning("Update Server not found, returning no update required.")
return None, update_required, json_data
else:
us_id = ret_obj['data']['uuid']
if new_name is not None:
json_data['name'] = new_name
else:
json_data['name'] = name
del ret_obj['data']['uuid']
sorted_json_data = ibmsecurity.utilities.tools.json_sort(json_data)
logger.debug("Sorted input: {0}".format(sorted_json_data))
sorted_ret_obj = ibmsecurity.utilities.tools.json_sort(ret_obj['data'])
logger.debug("Sorted existing data: {0}".format(sorted_ret_obj))
if sorted_ret_obj != sorted_json_data:
logger.info("Changes detected, update needed.")
update_required = True
return us_id, update_required, json_data
def enable(isamAppliance, name, enabled, check_mode=False, force=False):
"""
Update an update server's details.
"""
warnings = []
ret_obj = get(isamAppliance=isamAppliance, name=name)
if ret_obj['data'] == {}:
warnings.append("Update Server {0} not found.".format(name))
elif force is True or enabled != ret_obj['data']['enabled']:
logger.debug("Enable flag needs to be updated!")
return update(isamAppliance, priority=ret_obj['data']['priority'], name=name, enabled=enabled,
hostName=ret_obj['data']['hostName'], port=ret_obj['data']['port'],
trustLevel=ret_obj['data']['trustLevel'], useProxy=ret_obj['data']['useProxy'],
useProxyAuth=ret_obj['data']['useProxyAuth'], cert=ret_obj['data']['cert'],
proxyHost=ret_obj['data']['proxyHost'], proxyPort=ret_obj['data']['proxyPort'],
proxyUser=ret_obj['data']['proxyUser'], proxyPwd=ret_obj['data']['proxyPwd'],
check_mode=check_mode,
force=True)
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, name, check_mode=False, force=False):
"""
Delete an Update Server
"""
ret_obj = search(isamAppliance, name=name)
if ret_obj['data'] != {}:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete("Delete an Update Server", "{0}/{1}".format(uri, ret_obj['data']))
else:
logger.info("Update Server: {0} not found, delete skipped.".format(name))
return isamAppliance.create_return_object()
def compare(isamAppliance1, isamAppliance2):
"""
Compare Update Servers between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
| {
"content_hash": "e54a5b9888f851d39b1c63d28fd3f43f",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 120,
"avg_line_length": 40.89719626168224,
"alnum_prop": 0.591636197440585,
"repo_name": "IBM-Security/ibmsecurity",
"id": "898fd64bd9fa187589ef4b60630c54fdfe955982",
"size": "8752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibmsecurity/isam/base/update_servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1501984"
}
],
"symlink_target": ""
} |
"""Create a CSV file from the JSON output of sunlight.go."""
import json
import os
import sys
import codecs
def main():
if len(sys.argv) != 3:
sys.exit("Usage: " + sys.argv[0] + " <certs.json> <output_file.csv>")
f_in = open(sys.argv[1], "r")
#f_out = codecs.open(sys.argv[2], "w", encoding='utf8')
f_out = open(sys.argv[2], "w")
blob = json.loads(f_in.read())
certs = blob["Certs"]
issuers = {};
for c in certs:
issuer = c["Issuer"]
if not issuer in issuers:
issuers[issuer] = { "DeprecatedVersion": 0,
"DeprecatedSignatureAlgorithm": 0,
"ExpTooSmall": 0,
"IsCA": 0,
"KeyTooShort": 0,
"MissingCNinSAN": 0,
"ValidPeriodTooLong": 0 }
if c["DeprecatedVersion"]:
issuers[issuer]["DeprecatedVersion"] += 1
if c["DeprecatedSignatureAlgorithm"]:
issuers[issuer]["DeprecatedSignatureAlgorithm"] += 1
if c["ExpTooSmall"]:
issuers[issuer]["ExpTooSmall"] += 1
if c["IsCA"]:
issuers[issuer]["IsCA"] += 1
if c["KeyTooShort"]:
issuers[issuer]["KeyTooShort"] += 1
if c["MissingCNinSAN"]:
issuers[issuer]["MissingCNinSAN"] += 1
if c["ValidPeriodTooLong"]:
issuers[issuer]["ValidPeriodTooLong"] += 1
f_out.write("issuer,deprecatedVersion,deprecatedSignatureAlgorithm,expTooSmall,isCA,keyTooShort,missingCNinSAN,validPeriodTooLong,n_violations\n");
for issuer in issuers:
n_violations = (issuers[issuer]["DeprecatedVersion"] +
issuers[issuer]["DeprecatedSignatureAlgorithm"] +
issuers[issuer]["ExpTooSmall"] +
issuers[issuer]["KeyTooShort"] +
issuers[issuer]["MissingCNinSAN"] +
issuers[issuer]["ValidPeriodTooLong"])
f_out.write("%s,%d,%d,%d,%d,%d,%d,%d,%d\n" % (
issuer.encode('utf-8').replace(",", " - "),
issuers[issuer]["DeprecatedVersion"],
issuers[issuer]["DeprecatedSignatureAlgorithm"],
issuers[issuer]["ExpTooSmall"],
issuers[issuer]["IsCA"],
issuers[issuer]["KeyTooShort"],
issuers[issuer]["MissingCNinSAN"],
issuers[issuer]["ValidPeriodTooLong"], n_violations));
if __name__ == "__main__":
main()
| {
"content_hash": "25cbfe2de6737653bb56034c85e51827",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 149,
"avg_line_length": 36.184615384615384,
"alnum_prop": 0.5705782312925171,
"repo_name": "mozkeeler/sunlight",
"id": "e616d32b8abad2aafd8c6458eb9bdf15fab2a301",
"size": "2370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aggregate_issuers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "161"
},
{
"name": "Go",
"bytes": "26572"
},
{
"name": "HTML",
"bytes": "4267"
},
{
"name": "JavaScript",
"bytes": "11752"
},
{
"name": "Python",
"bytes": "2370"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
from PyQt5 import QtCore, QtWidgets
class ProgressDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.finished = False
self.initUI()
def initUI(self):
hbox = QtWidgets.QVBoxLayout()
self.label = QtWidgets.QLabel('starting calculation', self)
self.progressbar = QtWidgets.QProgressBar(self)
# self.cancel_btn = QtWidgets.QPushButton('Cancel', self)
# self.cancel_btn.clicked.connect(self.cancel)
self.setMinimumSize(300,100)
vbox = QtWidgets.QHBoxLayout()
vbox.addStretch()
# vbox.addWidget(self.cancel_btn)
vbox.addStretch()
hbox.addWidget(self.label)
hbox.addWidget(self.progressbar)
hbox.addLayout(vbox)
self.setLayout(hbox)
def calculation_finished(self):
self.finished = True
QtCore.QMetaObject.invokeMethod(self, "close_dialog", QtCore.Qt.QueuedConnection)
def progress(self, value):
QtCore.QMetaObject.invokeMethod(self.progressbar, "setValue", QtCore.Qt.QueuedConnection, QtCore.Q_ARG(int, value))
@QtCore.pyqtSlot()
def close_dialog(self):
if self.finished:
self.done(QtWidgets.QDialog.Accepted)
else:
self.done(QtWidgets.QDialog.Rejected)
pass
def print_step(self, *text):
tmp = str(text[0])
for t in text[1:]:
tmp += ' {}'.format(t)
self.label.setText(tmp)
def cancel(self):
print 'canceled calculation'
self.close_dialog()
| {
"content_hash": "3993626a2260cbe8e3cd980e6813f4df",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 123,
"avg_line_length": 29.5,
"alnum_prop": 0.6264908976773383,
"repo_name": "sciapp/pyMolDyn",
"id": "1eb7abbc4e3ff95a57fe970411bd9cb532e0686b",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gui/dialogs/progress_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "1998"
},
{
"name": "C",
"bytes": "32038"
},
{
"name": "CSS",
"bytes": "911"
},
{
"name": "HTML",
"bytes": "21678"
},
{
"name": "Makefile",
"bytes": "7526"
},
{
"name": "Python",
"bytes": "664457"
},
{
"name": "Ruby",
"bytes": "516"
},
{
"name": "Shell",
"bytes": "21156"
}
],
"symlink_target": ""
} |
import pyglet
from .physics import Circle
TEXTURE_PLAYER = pyglet.resource.texture('char.png')
TEXTURE_PLAYER.anchor_x = TEXTURE_PLAYER.width / 2
TEXTURE_PLAYER.anchor_y = TEXTURE_PLAYER.height / 2
class Actor:
"""
Base class for all actors
"""
_x = 0
_y = 0
_last_x = 0
_last_y = 0
_batch = pyglet.graphics.Batch()
_sprite = None
_physics_shape = None
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._last_x = self._x
self._x = value
if self._sprite:
self._sprite.x = self._x
if self._physics_shape:
self._physics_shape.x = self._x
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._last_y = self._y
self._y = value
if self._sprite:
self._sprite.y = self._y
if self._physics_shape:
self._physics_shape.y = self._y
def get_shape(self):
return self._physics_shape
def on_draw(self):
self._batch.draw()
def on_update(self, dt):
raise NotImplementedError()
class Player(Actor):
VELOCITY = 200
should_move_left = False
should_move_right = False
should_move_up = False
should_move_down = False
left_move = 0
right_move = 0
up_move = 0
down_move = 0
def __init__(self):
self._physics_shape = Circle(TEXTURE_PLAYER.width / 2)
self._physics_shape.set_callback(self._collision_callback)
self._sprite = pyglet.sprite.Sprite(
img=TEXTURE_PLAYER,
batch=self._batch
)
def _collision_callback(self, **kwargs):
x = self._x
y = self._y
if kwargs.get('left') or kwargs.get('right'):
x = self._last_x
if kwargs.get('top') or kwargs.get('bottom'):
y = self._last_y
self.x = x
self.y = y
def look_at(self, x, y):
pass
"""
dx = x - self.x
dy = y - self.y
radians = math.atan2(dy, dx)
self._sprite.rotation = -radians * (180 / math.pi)
"""
def on_update(self, dt):
force_x = 0
force_y = 0
if self.should_move_left:
force_x -= 1
if self.should_move_right:
force_x += 1
if self.should_move_up:
force_y += 1
if self.should_move_down:
force_y -= 1
# update position if we are moving
self.x += int((force_x * self.VELOCITY) * dt)
self.y += int((force_y * self.VELOCITY) * dt)
| {
"content_hash": "33bc18ad47a8b61546e850113de7e3d9",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 66,
"avg_line_length": 21.983193277310924,
"alnum_prop": 0.525611620795107,
"repo_name": "paeronskruven/pyweek22",
"id": "bcbc9a90e1eccda936f710f1e97f35cfe7ba2e73",
"size": "2616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pw22/actors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20420"
}
],
"symlink_target": ""
} |
"""
Project management classes and functions
"""
from PyQt5 import QtWidgets, QtCore
from views.message_dialog import Ui_message_dialog
class MessageHelper(QtWidgets.QDialog):
def __init__(self, parent):
super(MessageHelper, self).__init__(parent)
self.ui = Ui_message_dialog()
self.ui.setupUi(self)
self.ui.close_button.clicked.connect(self.close)
# Remove '?' icon
flags = self.windowFlags() & (~QtCore.Qt.WindowContextHelpButtonHint)
self.setWindowFlags(flags)
def show_message(self, message, title=None):
self.ui.label.setText(message)
if title is not None:
self.setWindowTitle(title)
self.adjustSize()
self.show()
| {
"content_hash": "afc04705c7a1b0916839cd1a29d1de42",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 25.379310344827587,
"alnum_prop": 0.6494565217391305,
"repo_name": "santosfamilyfoundation/SantosGUI",
"id": "569cf20ebed99baa3cfa78dd9ee2afee9f823b2d",
"size": "736",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "application/message_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181438"
},
{
"name": "Shell",
"bytes": "1516"
}
],
"symlink_target": ""
} |
"""A script that will rewrite audiotest* metadata to match their filenames."""
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import * # noqa
from glob import glob
import os
import mutagen
for fname in glob(u'audiotest*'):
audio = mutagen.File(fname, easy=True)
if audio is None:
print('could not open', fname)
continue
# clear existing tags
for key in list(audio.tags.keys()):
del audio.tags[key]
# write
base = os.path.basename(fname)
audio['title'] = base + ' title'
audio['artist'] = base + ' artist'
audio.save()
# read back to verify
audio = mutagen.File(fname, easy=True) # assume it worked; it worked above
print(fname)
print(' ', audio.tags)
| {
"content_hash": "c249baf797ecfb927f6cc3705d5d2344",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 82,
"avg_line_length": 26.266666666666666,
"alnum_prop": 0.6510152284263959,
"repo_name": "thebigmunch/gmusicapi",
"id": "c5e8f4aa3f36f93481010b61de4bb8d60abd07db",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gmusicapi/test/rewrite_audiotest_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "356173"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.shortcuts import render
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from models import Cytology
@login_required(login_url='/', redirect_field_name='')
def register_cytology(request):
clinical_information = request.POST.get('clinical_information')
quantity = request.POST.get('quantity')
microscopic = request.POST.get('microscopic')
conclusion = request.POST.get('conclusion')
note = request.POST.get('note')
footer = request.POST.get('footer')
cytology = Cytology(
clinical_information = clinical_information,
quantity = quantity,
microscopic = microscopic,
conclusion = conclusion,
note = note,
footer = footer
)
cytology.save()
return redirect('/exame/%d' % cytology.exam.id) | {
"content_hash": "6f8407d46e052d131663e343135ff47d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 32.13793103448276,
"alnum_prop": 0.7124463519313304,
"repo_name": "msfernandes/anato-hub",
"id": "96ed757f42ab326b8b546da0402ba85c6487a776",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cytology/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "383735"
},
{
"name": "JavaScript",
"bytes": "159416"
},
{
"name": "Python",
"bytes": "94588"
},
{
"name": "Ruby",
"bytes": "903"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
} |
"""
Tests for the Inspur InStorage volume driver.
"""
import re
from oslo_concurrency import processutils
from oslo_utils import units
import six
from cinder import exception
from cinder import utils
from cinder.volume.drivers.inspur.instorage import instorage_const
from cinder.volume.drivers.inspur.instorage import instorage_fc
from cinder.volume.drivers.inspur.instorage import instorage_iscsi
MCS_POOLS = ['openstack', 'openstack1']
def get_test_pool(get_all=False):
if get_all:
return MCS_POOLS
else:
return MCS_POOLS[0]
class FakeInStorageMCSFcDriver(instorage_fc.InStorageMCSFCDriver):
def __init__(self, *args, **kwargs):
super(FakeInStorageMCSFcDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
class FakeInStorageMCSISCSIDriver(instorage_iscsi.InStorageMCSISCSIDriver):
def __init__(self, *args, **kwargs):
super(FakeInStorageMCSISCSIDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
class FakeInStorage(object):
def __init__(self, pool_name):
self._flags = {'instorage_mcs_volpool_name': pool_name}
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._lcmappings_list = {}
self._lcconsistgrp_list = {}
self._rcrelationship_list = {}
self._partnership_list = {}
self._partnershipcandidate_list = {}
self._system_list = {'instorage-mcs-sim':
{'id': '0123456789ABCDEF',
'name': 'instorage-mcs-sim'},
'aux-mcs-sim': {'id': 'ABCDEF0123456789',
'name': 'aux-mcs-sim'}}
self._other_pools = {'openstack2': {}, 'openstack3': {}}
self._next_cmd_error = {
'lsportip': '',
'lsfabric': '',
'lsiscsiauth': '',
'lsnodecanister': '',
'mkvdisk': '',
'lsvdisk': '',
'lslcmap': '',
'prestartlcmap': '',
'startlcmap': '',
'rmlcmap': '',
'lslicense': '',
'lsguicapabilities': '',
'lshost': '',
'lsrcrelationship': ''
}
self._errors = {
'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'),
'CMMVC6035E': ('', 'CMMVC6035E The action failed as the '
'object already exists.'),
'CMMVC5753E': ('', 'CMMVC5753E The specified object does not '
'exist or is not a suitable candidate.'),
'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'),
'CMMVC6581E': ('', 'CMMVC6581E The command has failed because '
'the maximum number of allowed iSCSI '
'qualified names (IQNs) has been reached, '
'or the IQN is already assigned or is not '
'valid.'),
'CMMVC5754E': ('', 'CMMVC5754E The specified object does not '
'exist, or the name supplied does not meet '
'the naming rules.'),
'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was '
'not created because the VDisk is already '
'mapped to a host.'),
'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was '
'not created because a VDisk is already '
'mapped to this host with this SCSI LUN.'),
'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was '
'not deleted because it is mapped to a '
'host or because it is part of a LocalCopy '
'or Remote Copy mapping, or is involved in '
'an image mode migrate.'),
'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered '
'is not valid. The name can contain letters, '
'numbers, spaces, periods, dashes, and '
'underscores. The name must begin with a '
'letter or an underscore. The name must not '
'begin or end with a space.'),
'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or '
'more of the configured port names is in a '
'mapping.'),
'CMMVC5924E': ('', 'CMMVC5924E The LocalCopy mapping was not '
'created because the source and target '
'virtual disks (VDisks) are different sizes.'),
'CMMVC6303E': ('', 'CMMVC6303E The create failed because the '
'source and target VDisks are the same.'),
'CMMVC7050E': ('', 'CMMVC7050E The command failed because at '
'least one node in the I/O group does not '
'support compressed VDisks.'),
'CMMVC6430E': ('', 'CMMVC6430E The command failed because the '
'target and source managed disk groups must '
'be different.'),
'CMMVC6353E': ('', 'CMMVC6353E The command failed because the '
'copy specified does not exist.'),
'CMMVC6446E': ('', 'The command failed because the managed disk '
'groups have different extent sizes.'),
# Catch-all for invalid state transitions:
'CMMVC5903E': ('', 'CMMVC5903E The LocalCopy mapping was not '
'changed because the mapping or consistency '
'group is another state.'),
'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported '
'parameter.'),
'CMMVC5982E': ('', 'CMMVC5982E The operation was not performed '
'because it is not valid given the current '
'relationship state.'),
'CMMVC5963E': ('', 'CMMVC5963E No direction has been defined.'),
}
self._lc_transitions = {'begin': {'make': 'idle_or_copied'},
'idle_or_copied': {'prepare': 'preparing',
'delete': 'end',
'delete_force': 'end'},
'preparing': {'flush_failed': 'stopped',
'wait': 'prepared'},
'end': None,
'stopped': {'prepare': 'preparing',
'delete_force': 'end'},
'prepared': {'stop': 'stopped',
'start': 'copying'},
'copying': {'wait': 'idle_or_copied',
'stop': 'stopping'},
# Assume the worst case where stopping->stopped
# rather than stopping idle_or_copied
'stopping': {'wait': 'stopped'},
}
self._lc_cg_transitions = {'begin': {'make': 'empty'},
'empty': {'add': 'idle_or_copied'},
'idle_or_copied': {'prepare': 'preparing',
'delete': 'end',
'delete_force': 'end'},
'preparing': {'flush_failed': 'stopped',
'wait': 'prepared'},
'end': None,
'stopped': {'prepare': 'preparing',
'delete_force': 'end'},
'prepared': {'stop': 'stopped',
'start': 'copying',
'delete_force': 'end',
'delete': 'end'},
'copying': {'wait': 'idle_or_copied',
'stop': 'stopping',
'delete_force': 'end',
'delete': 'end'},
# Assume the case where stopping->stopped
# rather than stopping idle_or_copied
'stopping': {'wait': 'stopped'},
}
self._rc_transitions = {'inconsistent_stopped':
{'start': 'inconsistent_copying',
'stop': 'inconsistent_stopped',
'delete': 'end',
'delete_force': 'end'},
'inconsistent_copying': {
'wait': 'consistent_synchronized',
'start': 'inconsistent_copying',
'stop': 'inconsistent_stopped',
'delete': 'end',
'delete_force': 'end'},
'consistent_synchronized': {
'start': 'consistent_synchronized',
'stop': 'consistent_stopped',
'stop_access': 'idling',
'delete': 'end',
'delete_force': 'end'},
'consistent_stopped':
{'start': 'consistent_synchronized',
'stop': 'consistent_stopped',
'delete': 'end',
'delete_force': 'end'},
'end': None,
'idling': {
'start': 'inconsistent_copying',
'stop': 'inconsistent_stopped',
'stop_access': 'idling',
'delete': 'end',
'delete_force': 'end'},
}
def _state_transition(self, function, lcmap):
if (function == 'wait' and
'wait' not in self._lc_transitions[lcmap['status']]):
return ('', '')
if lcmap['status'] == 'copying' and function == 'wait':
if lcmap['copyrate'] != '0':
if lcmap['progress'] == '0':
lcmap['progress'] = '50'
else:
lcmap['progress'] = '100'
lcmap['status'] = 'idle_or_copied'
return ('', '')
else:
try:
curr_state = lcmap['status']
lcmap['status'] = self._lc_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
def _lc_cg_state_transition(self, function, lc_consistgrp):
if (function == 'wait' and
'wait' not in self._lc_transitions[lc_consistgrp['status']]):
return ('', '')
try:
curr_state = lc_consistgrp['status']
new_state = self._lc_cg_transitions[curr_state][function]
lc_consistgrp['status'] = new_state
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
# Find an unused ID
@staticmethod
def _find_unused_id(d):
ids = []
for v in d.values():
ids.append(int(v['id']))
ids.sort()
for index, n in enumerate(ids):
if n > index:
return six.text_type(index)
return six.text_type(len(ids))
# Check if name is valid
@staticmethod
def _is_invalid_name(name):
if re.match(r'^[a-zA-Z_][\w._-]*$', name):
return False
return True
# Convert argument string to dictionary
@staticmethod
def _cmd_to_dict(arg_list):
no_param_args = [
'autodelete',
'bytes',
'compressed',
'force',
'nohdr',
'nofmtdisk',
'async',
'access',
'start'
]
one_param_args = [
'chapsecret',
'cleanrate',
'copy',
'copyrate',
'delim',
'intier',
'filtervalue',
'grainsize',
'hbawwpn',
'host',
'iogrp',
'iscsiname',
'mdiskgrp',
'name',
'rsize',
'scsi',
'size',
'source',
'target',
'unit',
'vdisk',
'warning',
'wwpn',
'primary',
'consistgrp',
'master',
'aux',
'cluster',
'linkbandwidthmbits',
'backgroundcopyrate'
]
no_or_one_param_args = [
'autoexpand',
]
# Handle the special case of lsnode which is a two-word command
# Use the one word version of the command internally
if arg_list[0] in ('mcsinq', 'mcsop'):
if arg_list[1] == 'lsnode':
if len(arg_list) > 4: # e.g. mcsinq lsnode -delim ! <node id>
ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]}
else:
ret = {'cmd': 'lsnodecanister'}
else:
ret = {'cmd': arg_list[1]}
arg_list.pop(0)
else:
ret = {'cmd': arg_list[0]}
skip = False
for i in range(1, len(arg_list)):
if skip:
skip = False
continue
# Check for a quoted command argument for volumes and strip
# quotes so that the simulater can match it later. Just
# match against test naming convensions for now.
if arg_list[i][0] == '"' and ('volume' in arg_list[i] or
'snapshot' in arg_list[i]):
arg_list[i] = arg_list[i][1:-1]
if arg_list[i][0] == '-':
if arg_list[i][1:] in no_param_args:
ret[arg_list[i][1:]] = True
elif arg_list[i][1:] in one_param_args:
ret[arg_list[i][1:]] = arg_list[i + 1]
skip = True
elif arg_list[i][1:] in no_or_one_param_args:
if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-':
ret[arg_list[i][1:]] = True
else:
ret[arg_list[i][1:]] = arg_list[i + 1]
skip = True
else:
raise exception.InvalidInput(
reason='unrecognized argument %s' % arg_list[i])
else:
ret['obj'] = arg_list[i]
return ret
@staticmethod
def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs):
"""Generic function for printing information."""
if nohdr:
del rows[0]
for index in range(len(rows)):
rows[index] = delim.join(rows[index])
return ('%s' % '\n'.join(rows), '')
@staticmethod
def _print_info_obj_cmd(header, row, delim=' ', nohdr=False):
"""Generic function for printing information for a specific object."""
objrows = []
for idx, val in enumerate(header):
objrows.append([val, row[idx]])
if nohdr:
for index in range(len(objrows)):
objrows[index] = ' '.join(objrows[index][1:])
for index in range(len(objrows)):
objrows[index] = delim.join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
@staticmethod
def _convert_bytes_units(bytestr):
num = int(bytestr)
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while num > 1024:
num = num / 1024
unit_index += 1
return '%d%s' % (num, unit_array[unit_index])
@staticmethod
def _convert_units_bytes(num, unit):
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while unit.lower() != unit_array[unit_index].lower():
num = num * 1024
unit_index += 1
return six.text_type(num)
def _cmd_lslicense(self, **kwargs):
rows = [None] * 3
rows[0] = ['used_compression_capacity', '0.08']
rows[1] = ['license_compression_capacity', '0']
if self._next_cmd_error['lslicense'] == 'no_compression':
self._next_cmd_error['lslicense'] = ''
rows[2] = ['license_compression_enclosures', '0']
else:
rows[2] = ['license_compression_enclosures', '1']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsguicapabilities(self, **kwargs):
rows = [None] * 2
if self._next_cmd_error['lsguicapabilities'] == 'no_compression':
self._next_cmd_error['lsguicapabilities'] = ''
rows[0] = ['license_scheme', '0']
else:
rows[0] = ['license_scheme', '1813']
rows[1] = ['product_key', instorage_const.DEV_MODEL_INSTORAGE]
return self._print_info_cmd(rows=rows, **kwargs)
# Print mostly made-up stuff in the correct syntax
def _cmd_lssystem(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', '0123456789ABCDEF']
rows[1] = ['name', 'instorage-mcs-sim']
rows[2] = ['code_level', '3.1.1.0 (build 87.0.1311291000)']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lssystem_aux(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', 'ABCDEF0123456789']
rows[1] = ['name', 'aux-mcs-sim']
rows[2] = ['code_level', '3.1.1.0 (build 87.0.1311291000)']
return self._print_info_cmd(rows=rows, **kwargs)
# Print mostly made-up stuff in the correct syntax, assume -bytes passed
def _cmd_lsmdiskgrp(self, **kwargs):
pool_num = len(self._flags['instorage_mcs_volpool_name'])
rows = []
rows.append(['id', 'name', 'status', 'mdisk_count',
'vdisk_count', 'capacity', 'extent_size',
'free_capacity', 'virtual_capacity', 'used_capacity',
'real_capacity', 'overallocation', 'warning',
'in_tier', 'in_tier_status'])
for i in range(pool_num):
row_data = [str(i + 1),
self._flags['instorage_mcs_volpool_name'][i], 'online',
'1', six.text_type(len(self._volumes_list)),
'3573412790272', '256', '3529926246400',
'1693247906775',
'26843545600', '38203734097', '47', '80', 'auto',
'inactive']
rows.append(row_data)
rows.append([str(pool_num + 1), 'openstack2', 'online',
'1', '0', '3573412790272', '256',
'3529432325160', '1693247906775', '26843545600',
'38203734097', '47', '80', 'auto', 'inactive'])
rows.append([str(pool_num + 2), 'openstack3', 'online',
'1', '0', '3573412790272', '128',
'3529432325160', '1693247906775', '26843545600',
'38203734097', '47', '80', 'auto', 'inactive'])
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
pool_name = kwargs['obj'].strip('\'\"')
if pool_name == kwargs['obj']:
raise exception.InvalidInput(
reason='obj missing quotes %s' % kwargs['obj'])
elif pool_name in self._flags['instorage_mcs_volpool_name']:
for each_row in rows:
if pool_name in each_row:
row = each_row
break
elif pool_name == 'openstack2':
row = rows[-2]
elif pool_name == 'openstack3':
row = rows[-1]
else:
return self._errors['CMMVC5754E']
objrows = []
for idx, val in enumerate(rows[0]):
objrows.append([val, row[idx]])
if 'nohdr' in kwargs:
for index in range(len(objrows)):
objrows[index] = ' '.join(objrows[index][1:])
if 'delim' in kwargs:
for index in range(len(objrows)):
objrows[index] = kwargs['delim'].join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
# Print mostly made-up stuff in the correct syntax
def _cmd_lsnodecanister(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status',
'IO_group_id', 'IO_group_name', 'config_node',
'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias',
'panel_name', 'enclosure_id', 'canister_id',
'enclosure_serial_number']
rows[1] = [
'1',
'node1',
'',
'123456789ABCDEF0',
'online',
'0',
'io_grp0',
'yes',
'123456789ABCDEF0',
'100',
'iqn.1982-01.com.inspur:1234.sim.node1',
'',
'01-1',
'1',
'1',
'0123ABC']
rows[2] = [
'2',
'node2',
'',
'123456789ABCDEF1',
'online',
'0',
'io_grp0',
'no',
'123456789ABCDEF1',
'100',
'iqn.1982-01.com.inspur:1234.sim.node2',
'',
'01-2',
'1',
'2',
'0123ABC']
if self._next_cmd_error['lsnodecanister'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsnodecanister'] = ''
if self._next_cmd_error['lsnodecanister'] == 'remove_field':
for row in rows:
row.pop(0)
self._next_cmd_error['lsnodecanister'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
# Print information of every single node of MCS
def _cmd_lsnode(self, **kwargs):
node_infos = dict()
node_infos['1'] = r'''id!1
name!node1
port_id!500507680210C744
port_status!active
port_speed!8Gb
port_id!500507680220C744
port_status!active
port_speed!8Gb
'''
node_infos['2'] = r'''id!2
name!node2
port_id!500507680220C745
port_status!active
port_speed!8Gb
port_id!500507680230C745
port_status!inactive
port_speed!N/A
'''
node_id = kwargs.get('node_id', None)
stdout = node_infos.get(node_id, '')
return stdout, ''
# Print made up stuff for the ports
def _cmd_lsportfc(self, **kwargs):
node_1 = [None] * 7
node_1[0] = ['id', 'fc_io_port_id', 'port_id', 'type',
'port_speed', 'node_id', 'node_name', 'WWPN',
'nportid', 'status', 'attachment']
node_1[1] = ['0', '1', '1', 'fc', '8Gb', '1', 'node1',
'5005076802132ADE', '012E00', 'active', 'switch']
node_1[2] = ['1', '2', '2', 'fc', '8Gb', '1', 'node1',
'5005076802232ADE', '012E00', 'active', 'switch']
node_1[3] = ['2', '3', '3', 'fc', '8Gb', '1', 'node1',
'5005076802332ADE', '9B0600', 'active', 'switch']
node_1[4] = ['3', '4', '4', 'fc', '8Gb', '1', 'node1',
'5005076802432ADE', '012A00', 'active', 'switch']
node_1[5] = ['4', '5', '5', 'fc', '8Gb', '1', 'node1',
'5005076802532ADE', '014A00', 'active', 'switch']
node_1[6] = ['5', '6', '4', 'ethernet', 'N/A', '1', 'node1',
'5005076802632ADE', '000000',
'inactive_unconfigured', 'none']
node_2 = [None] * 7
node_2[0] = ['id', 'fc_io_port_id', 'port_id', 'type',
'port_speed', 'node_id', 'node_name', 'WWPN',
'nportid', 'status', 'attachment']
node_2[1] = ['6', '7', '7', 'fc', '8Gb', '2', 'node2',
'5005086802132ADE', '012E00', 'active', 'switch']
node_2[2] = ['7', '8', '8', 'fc', '8Gb', '2', 'node2',
'5005086802232ADE', '012E00', 'active', 'switch']
node_2[3] = ['8', '9', '9', 'fc', '8Gb', '2', 'node2',
'5005086802332ADE', '9B0600', 'active', 'switch']
node_2[4] = ['9', '10', '10', 'fc', '8Gb', '2', 'node2',
'5005086802432ADE', '012A00', 'active', 'switch']
node_2[5] = ['10', '11', '11', 'fc', '8Gb', '2', 'node2',
'5005086802532ADE', '014A00', 'active', 'switch']
node_2[6] = ['11', '12', '12', 'ethernet', 'N/A', '2', 'node2',
'5005086802632ADE', '000000',
'inactive_unconfigured', 'none']
node_infos = [node_1, node_2]
node_id = int(kwargs['filtervalue'].split('=')[1]) - 1
return self._print_info_cmd(rows=node_infos[node_id], **kwargs)
# Print mostly made-up stuff in the correct syntax
def _cmd_lsportip(self, **kwargs):
if self._next_cmd_error['lsportip'] == 'ip_no_config':
self._next_cmd_error['lsportip'] = ''
ip_addr1 = ''
ip_addr2 = ''
gw = ''
else:
ip_addr1 = '1.234.56.78'
ip_addr2 = '1.234.56.79'
ip_addr3 = '1.234.56.80'
ip_addr4 = '1.234.56.81'
gw = '1.234.56.1'
rows = [None] * 17
rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask',
'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC',
'duplex', 'state', 'speed', 'failover', 'link_state']
rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:00', 'Full',
'online', '1Gb/s', 'no', 'active']
rows[2] = ['1', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes', '']
rows[3] = ['2', '1', 'node1', ip_addr3, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:01', 'Full',
'configured', '1Gb/s', 'no', 'active']
rows[4] = ['2', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s',
'yes', 'inactive']
rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:02', 'Full',
'online', '1Gb/s', 'no', '']
rows[10] = ['1', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes', '']
rows[11] = ['2', '2', 'node2', ip_addr4, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:03', 'Full',
'configured', '1Gb/s', 'no', 'inactive']
rows[12] = ['2', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s',
'yes', '']
rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
if self._next_cmd_error['lsportip'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsportip'] = ''
if self._next_cmd_error['lsportip'] == 'remove_field':
for row in rows:
row.pop(1)
self._next_cmd_error['lsportip'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsfabric(self, **kwargs):
if self._next_cmd_error['lsfabric'] == 'no_hosts':
return ('', '')
host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None
target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None
host_infos = []
for hv in self._hosts_list.values():
if (not host_name) or (hv['host_name'] == host_name):
if not target_wwpn or target_wwpn in hv['wwpns']:
host_infos.append(hv)
break
if not len(host_infos):
return ('', '')
rows = []
rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name',
'local_wwpn', 'local_port', 'local_nportid', 'state',
'name', 'cluster_name', 'type'])
for host_info in host_infos:
for wwpn in host_info['wwpns']:
rows.append([wwpn, '123456', host_info['id'], 'nodeN',
'AABBCCDDEEFF0011', '1', '0123ABC', 'active',
host_info['host_name'], '', 'host'])
if self._next_cmd_error['lsfabric'] == 'header_mismatch':
rows[0].pop(0)
self._next_cmd_error['lsfabric'] = ''
if self._next_cmd_error['lsfabric'] == 'remove_field':
for row in rows:
row.pop(0)
self._next_cmd_error['lsfabric'] = ''
if self._next_cmd_error['lsfabric'] == 'remove_rows':
rows = []
return self._print_info_cmd(rows=rows, **kwargs)
def _get_lcmap_info(self, vol_name):
ret_vals = {
'fc_id': '',
'fc_name': '',
'lc_map_count': '0',
}
for lcmap in self._lcmappings_list.values():
if ((lcmap['source'] == vol_name) or
(lcmap['target'] == vol_name)):
ret_vals['fc_id'] = lcmap['id']
ret_vals['fc_name'] = lcmap['name']
ret_vals['lc_map_count'] = '1'
return ret_vals
# List information about vdisks
def _cmd_lsvdisk(self, **kwargs):
rows = []
rows.append(['id', 'name', 'IO_group_id', 'IO_group_name',
'status', 'mdisk_grp_id', 'mdisk_grp_name',
'capacity', 'type', 'FC_id', 'FC_name', 'RC_id',
'RC_name', 'vdisk_UID', 'lc_map_count', 'copy_count',
'fast_write_state', 'se_copy_count', 'RC_change'])
for vol in self._volumes_list.values():
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == 'name=' + vol['name']) or
(kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])):
lcmap_info = self._get_lcmap_info(vol['name'])
if 'bytes' in kwargs:
cap = self._convert_bytes_units(vol['capacity'])
else:
cap = vol['capacity']
rows.append([six.text_type(vol['id']), vol['name'],
vol['IO_group_id'],
vol['IO_group_name'], 'online', '0',
get_test_pool(),
cap, 'striped',
lcmap_info['fc_id'], lcmap_info['fc_name'],
'', '', vol['uid'],
lcmap_info['lc_map_count'], '1', 'empty',
'1', 'no'])
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
if kwargs['obj'] not in self._volumes_list:
return self._errors['CMMVC5754E']
vol = self._volumes_list[kwargs['obj']]
lcmap_info = self._get_lcmap_info(vol['name'])
cap = vol['capacity']
cap_u = vol['used_capacity']
cap_r = vol['real_capacity']
cap_f = vol['free_capacity']
if 'bytes' not in kwargs:
for item in [cap, cap_u, cap_r, cap_f]:
item = self._convert_bytes_units(item)
rows = []
rows.append(['id', six.text_type(vol['id'])])
rows.append(['name', vol['name']])
rows.append(['IO_group_id', vol['IO_group_id']])
rows.append(['IO_group_name', vol['IO_group_name']])
rows.append(['status', 'online'])
rows.append(['capacity', cap])
rows.append(['formatted', vol['formatted']])
rows.append(['mdisk_id', ''])
rows.append(['mdisk_name', ''])
rows.append(['FC_id', lcmap_info['fc_id']])
rows.append(['FC_name', lcmap_info['fc_name']])
rows.append(['RC_id', vol['RC_id']])
rows.append(['RC_name', vol['RC_name']])
rows.append(['vdisk_UID', vol['uid']])
rows.append(['throttling', '0'])
if self._next_cmd_error['lsvdisk'] == 'blank_pref_node':
rows.append(['preferred_node_id', ''])
self._next_cmd_error['lsvdisk'] = ''
elif self._next_cmd_error['lsvdisk'] == 'no_pref_node':
self._next_cmd_error['lsvdisk'] = ''
else:
rows.append(['preferred_node_id', '1'])
rows.append(['fast_write_state', 'empty'])
rows.append(['cache', 'readwrite'])
rows.append(['udid', ''])
rows.append(['lc_map_count', lcmap_info['lc_map_count']])
rows.append(['sync_rate', '50'])
rows.append(['copy_count', '1'])
rows.append(['se_copy_count', '0'])
rows.append(['mirror_write_priority', 'latency'])
rows.append(['RC_change', 'no'])
for copy in vol['copies'].values():
rows.append(['copy_id', copy['id']])
rows.append(['status', copy['status']])
rows.append(['primary', copy['primary']])
rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
rows.append(['type', 'striped'])
rows.append(['used_capacity', cap_u])
rows.append(['real_capacity', cap_r])
rows.append(['free_capacity', cap_f])
rows.append(['in_tier', copy['in_tier']])
rows.append(['compressed_copy', copy['compressed_copy']])
rows.append(['autoexpand', vol['autoexpand']])
rows.append(['warning', vol['warning']])
rows.append(['grainsize', vol['grainsize']])
if 'nohdr' in kwargs:
for index in range(len(rows)):
rows[index] = ' '.join(rows[index][1:])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lsiogrp(self, **kwargs):
rows = [None] * 6
rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count']
rows[1] = ['0', 'io_grp0', '2', '0', '4']
rows[2] = ['1', 'io_grp1', '2', '0', '4']
rows[3] = ['2', 'io_grp2', '0', '0', '4']
rows[4] = ['3', 'io_grp3', '0', '0', '4']
rows[5] = ['4', 'recovery_io_grp', '0', '0', '0']
return self._print_info_cmd(rows=rows, **kwargs)
# List information about hosts
def _cmd_lshost(self, **kwargs):
if 'obj' not in kwargs:
rows = []
rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
found = False
# Sort hosts by names to give predictable order for tests
# depend on it.
for host_name in sorted(self._hosts_list.keys()):
host = self._hosts_list[host_name]
filterstr = 'name=' + host['host_name']
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == filterstr)):
rows.append([host['id'], host['host_name'], '1', '4',
'offline'])
found = True
if found:
return self._print_info_cmd(rows=rows, **kwargs)
else:
return ('', '')
else:
if self._next_cmd_error['lshost'] == 'missing_host':
self._next_cmd_error['lshost'] = ''
return self._errors['CMMVC5754E']
elif self._next_cmd_error['lshost'] == 'bigger_troubles':
return self._errors['CMMVC6527E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5754E']
if (self._next_cmd_error['lshost'] == 'fail_fastpath' and
host_name == 'DifferentHost'):
return self._errors['CMMVC5701E']
host = self._hosts_list[host_name]
rows = []
rows.append(['id', host['id']])
rows.append(['name', host['host_name']])
rows.append(['port_count', '1'])
rows.append(['type', 'generic'])
rows.append(['mask', '1111'])
rows.append(['iogrp_count', '4'])
rows.append(['status', 'online'])
for port in host['iscsi_names']:
rows.append(['iscsi_name', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'offline'])
for port in host['wwpns']:
rows.append(['WWPN', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'active'])
if 'nohdr' in kwargs:
for index in range(len(rows)):
rows[index] = ' '.join(rows[index][1:])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
# List iSCSI authorization information about hosts
def _cmd_lsiscsiauth(self, **kwargs):
if self._next_cmd_error['lsiscsiauth'] == 'no_info':
self._next_cmd_error['lsiscsiauth'] = ''
return ('', '')
rows = []
rows.append(['type', 'id', 'name', 'iscsi_auth_method',
'iscsi_chap_secret'])
for host in self._hosts_list.values():
method = 'none'
secret = ''
if 'chapsecret' in host:
method = 'chap'
secret = host['chapsecret']
rows.append(['host', host['id'], host['host_name'], method,
secret])
return self._print_info_cmd(rows=rows, **kwargs)
# List information about host->vdisk mappings
def _cmd_lshostvdiskmap(self, **kwargs):
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5754E']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
'vdisk_UID'])
for mapping in self._mappings_list.values():
if (host_name == '') or (mapping['host'] == host_name):
volume = self._volumes_list[mapping['vol']]
rows.append([mapping['id'], mapping['host'],
mapping['lun'], volume['id'],
volume['name'], volume['uid']])
return self._print_info_cmd(rows=rows, **kwargs)
# List information about vdisk->host mappings
def _cmd_lsvdiskhostmap(self, **kwargs):
mappings_found = 0
vdisk_name = kwargs['obj'].strip('\'\"')
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC5753E']
rows = []
rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID',
'IO_group_id', 'IO_group_name'])
for mapping in self._mappings_list.values():
if (mapping['vol'] == vdisk_name):
mappings_found += 1
volume = self._volumes_list[mapping['vol']]
host = self._hosts_list[mapping['host']]
rows.append([volume['id'], mapping['lun'], host['id'],
host['host_name'], volume['uid'],
volume['IO_group_id'], volume['IO_group_name']])
if mappings_found:
return self._print_info_cmd(rows=rows, **kwargs)
else:
return ('', '')
def _cmd_lsvdisklcmappings(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
vdisk = kwargs['obj']
rows = []
rows.append(['id', 'name'])
for v in self._lcmappings_list.values():
if v['source'] == vdisk or v['target'] == vdisk:
rows.append([v['id'], v['name']])
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lslcmap(self, **kwargs):
rows = []
rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name',
'target_vdisk_id', 'target_vdisk_name', 'group_id',
'group_name', 'status', 'progress', 'copy_rate',
'clean_progress', 'incremental', 'partner_FC_id',
'partner_FC_name', 'restoring', 'start_time',
'rc_controlled'])
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = []
for k, v in self._lcmappings_list.items():
if six.text_type(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']]
self._state_transition('wait', v)
if self._next_cmd_error['lslcmap'] == 'speed_up':
self._next_cmd_error['lslcmap'] = ''
curr_state = v['status']
while self._state_transition('wait', v) == ("", ""):
if curr_state == v['status']:
break
curr_state = v['status']
if ((v['status'] == 'idle_or_copied' and v['autodelete'] and
v['progress'] == '100') or (v['status'] == 'end')):
to_delete.append(k)
else:
rows.append([v['id'], v['name'], source['id'],
source['name'], target['id'], target['name'],
'', '', v['status'], v['progress'],
v['copyrate'], '100', 'off', '', '', 'no', '',
'no'])
for d in to_delete:
del self._lcmappings_list[d]
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lslcconsistgrp(self, **kwargs):
rows = []
if 'obj' not in kwargs:
rows.append(['id', 'name', 'status' 'start_time'])
for lcconsistgrp in self._lcconsistgrp_list.values():
rows.append([lcconsistgrp['id'],
lcconsistgrp['name'],
lcconsistgrp['status'],
lcconsistgrp['start_time']])
return self._print_info_cmd(rows=rows, **kwargs)
else:
lcconsistgrp = None
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if self._lcconsistgrp_list[cg_id]['name'] == kwargs['obj']:
lcconsistgrp = self._lcconsistgrp_list[cg_id]
rows = []
rows.append(['id', six.text_type(cg_id)])
rows.append(['name', lcconsistgrp['name']])
rows.append(['status', lcconsistgrp['status']])
rows.append(['autodelete',
six.text_type(lcconsistgrp['autodelete'])])
rows.append(['start_time',
six.text_type(lcconsistgrp['start_time'])])
for lcmap_id in lcconsistgrp['lcmaps'].keys():
rows.append(['FC_mapping_id', six.text_type(lcmap_id)])
rows.append(['FC_mapping_name',
lcconsistgrp['lcmaps'][lcmap_id]])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
self._lc_cg_state_transition('wait', lcconsistgrp)
return ('%s' % '\n'.join(rows), '')
def _cmd_lsvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5804E']
name = kwargs['obj']
vol = self._volumes_list[name]
rows = []
rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync',
'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity',
'type', 'se_copy', 'in_tier', 'in_tier_status',
'compressed_copy'])
for copy in vol['copies'].values():
rows.append([vol['id'], vol['name'], copy['id'],
copy['status'], copy['sync'], copy['primary'],
copy['mdisk_grp_id'], copy['mdisk_grp_name'],
vol['capacity'], 'striped', 'yes', copy['in_tier'],
'inactive', copy['compressed_copy']])
if 'copy' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
copy_id = kwargs['copy'].strip('\'\"')
if copy_id not in vol['copies']:
return self._errors['CMMVC6353E']
copy = vol['copies'][copy_id]
rows = []
rows.append(['vdisk_id', vol['id']])
rows.append(['vdisk_name', vol['name']])
rows.append(['capacity', vol['capacity']])
rows.append(['copy_id', copy['id']])
rows.append(['status', copy['status']])
rows.append(['sync', copy['sync']])
copy['sync'] = 'yes'
rows.append(['primary', copy['primary']])
rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
rows.append(['in_tier', copy['in_tier']])
rows.append(['in_tier_status', 'inactive'])
rows.append(['compressed_copy', copy['compressed_copy']])
rows.append(['autoexpand', vol['autoexpand']])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
# list vdisk sync process
def _cmd_lsvdisksyncprogress(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5804E']
name = kwargs['obj']
copy_id = kwargs.get('copy', None)
vol = self._volumes_list[name]
rows = []
rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress',
'estimated_completion_time'])
copy_found = False
for copy in vol['copies'].values():
if not copy_id or copy_id == copy['id']:
copy_found = True
row = [vol['id'], name, copy['id']]
if copy['sync'] == 'yes':
row.extend(['100', ''])
else:
row.extend(['50', '140210115226'])
copy['sync'] = 'yes'
rows.append(row)
if not copy_found:
return self._errors['CMMVC5804E']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsrcrelationship(self, **kwargs):
rows = []
rows.append(['id', 'name', 'master_cluster_id', 'master_cluster_name',
'master_vdisk_id', 'master_vdisk_name', 'aux_cluster_id',
'aux_cluster_name', 'aux_vdisk_id', 'aux_vdisk_name',
'consistency_group_id', 'primary',
'consistency_group_name', 'state', 'bg_copy_priority',
'progress', 'freeze_time', 'status', 'sync',
'copy_type', 'cycling_mode', 'cycle_period_seconds',
'master_change_vdisk_id', 'master_change_vdisk_name',
'aux_change_vdisk_id', 'aux_change_vdisk_name'])
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
for k, v in self._rcrelationship_list.items():
if six.text_type(v[filter_key]) == filter_value:
self._rc_state_transition('wait', v)
if self._next_cmd_error['lsrcrelationship'] == 'speed_up':
self._next_cmd_error['lsrcrelationship'] = ''
curr_state = v['status']
while self._rc_state_transition('wait', v) == ("", ""):
if curr_state == v['status']:
break
curr_state = v['status']
rows.append([v['id'], v['name'], v['master_cluster_id'],
v['master_cluster_name'], v['master_vdisk_id'],
v['master_vdisk_name'], v['aux_cluster_id'],
v['aux_cluster_name'], v['aux_vdisk_id'],
v['aux_vdisk_name'], v['consistency_group_id'],
v['primary'], v['consistency_group_name'],
v['state'], v['bg_copy_priority'], v['progress'],
v['freeze_time'], v['status'], v['sync'],
v['copy_type'], v['cycling_mode'],
v['cycle_period_seconds'],
v['master_change_vdisk_id'],
v['master_change_vdisk_name'],
v['aux_change_vdisk_id'],
v['aux_change_vdisk_name']])
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lspartnershipcandidate(self, **kwargs):
rows = [None] * 4
master_sys = self._system_list['instorage-mcs-sim']
aux_sys = self._system_list['aux-mcs-sim']
rows[0] = ['id', 'configured', 'name']
rows[1] = [master_sys['id'], 'no', master_sys['name']]
rows[2] = [aux_sys['id'], 'no', aux_sys['name']]
rows[3] = ['0123456789001234', 'no', 'fake_mcs']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lspartnership(self, **kwargs):
rows = []
rows.append(['id', 'name', 'location', 'partnership',
'type', 'cluster_ip', 'event_log_sequence'])
master_sys = self._system_list['instorage-mcs-sim']
if master_sys['name'] not in self._partnership_list:
local_info = {}
local_info['id'] = master_sys['id']
local_info['name'] = master_sys['name']
local_info['location'] = 'local'
local_info['type'] = ''
local_info['cluster_ip'] = ''
local_info['event_log_sequence'] = ''
local_info['chap_secret'] = ''
local_info['linkbandwidthmbits'] = ''
local_info['backgroundcopyrate'] = ''
local_info['partnership'] = ''
self._partnership_list[master_sys['id']] = local_info
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
for k, v in self._partnership_list.items():
if six.text_type(v[filter_key]) == filter_value:
rows.append([v['id'], v['name'], v['location'],
v['partnership'], v['type'], v['cluster_ip'],
v['event_log_sequence']])
return self._print_info_cmd(rows=rows, **kwargs)
def _get_mdiskgrp_id(self, mdiskgrp):
grp_num = len(self._flags['instorage_mcs_volpool_name'])
if mdiskgrp in self._flags['instorage_mcs_volpool_name']:
for i in range(grp_num):
if mdiskgrp == self._flags['instorage_mcs_volpool_name'][i]:
return i + 1
elif mdiskgrp == 'openstack2':
return grp_num + 1
elif mdiskgrp == 'openstack3':
return grp_num + 2
else:
return None
# Create a vdisk
def _cmd_mkvdisk(self, **kwargs):
# We only save the id/uid, name, and size - all else will be made up
volume_info = {}
volume_info['id'] = self._find_unused_id(self._volumes_list)
volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
if mdiskgrp == kwargs['mdiskgrp']:
raise exception.InvalidInput(
reason='mdiskgrp missing quotes %s' % kwargs['mdiskgrp'])
mdiskgrp_id = self._get_mdiskgrp_id(mdiskgrp)
volume_info['mdisk_grp_name'] = mdiskgrp
volume_info['mdisk_grp_id'] = str(mdiskgrp_id)
if 'name' in kwargs:
volume_info['name'] = kwargs['name'].strip('\'\"')
else:
volume_info['name'] = 'vdisk' + volume_info['id']
# Assume size and unit are given, store it in bytes
capacity = int(kwargs['size'])
unit = kwargs['unit']
volume_info['capacity'] = self._convert_units_bytes(capacity, unit)
volume_info['IO_group_id'] = kwargs['iogrp']
volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp']
volume_info['RC_name'] = ''
volume_info['RC_id'] = ''
if 'intier' in kwargs:
if kwargs['intier'] == 'on':
volume_info['in_tier'] = 'on'
else:
volume_info['in_tier'] = 'off'
if 'rsize' in kwargs:
volume_info['formatted'] = 'no'
# Fake numbers
volume_info['used_capacity'] = '786432'
volume_info['real_capacity'] = '21474816'
volume_info['free_capacity'] = '38219264'
if 'warning' in kwargs:
volume_info['warning'] = kwargs['warning'].rstrip('%')
else:
volume_info['warning'] = '80'
if 'autoexpand' in kwargs:
volume_info['autoexpand'] = 'on'
else:
volume_info['autoexpand'] = 'off'
if 'grainsize' in kwargs:
volume_info['grainsize'] = kwargs['grainsize']
else:
volume_info['grainsize'] = '32'
if 'compressed' in kwargs:
volume_info['compressed_copy'] = 'yes'
else:
volume_info['compressed_copy'] = 'no'
else:
volume_info['used_capacity'] = volume_info['capacity']
volume_info['real_capacity'] = volume_info['capacity']
volume_info['free_capacity'] = '0'
volume_info['warning'] = ''
volume_info['autoexpand'] = ''
volume_info['grainsize'] = ''
volume_info['compressed_copy'] = 'no'
volume_info['formatted'] = 'yes'
if 'nofmtdisk' in kwargs:
if kwargs['nofmtdisk']:
volume_info['formatted'] = 'no'
vol_cp = {'id': '0',
'status': 'online',
'sync': 'yes',
'primary': 'yes',
'mdisk_grp_id': str(mdiskgrp_id),
'mdisk_grp_name': mdiskgrp,
'in_tier': volume_info['in_tier'],
'compressed_copy': volume_info['compressed_copy']}
volume_info['copies'] = {'0': vol_cp}
if volume_info['name'] in self._volumes_list:
return self._errors['CMMVC6035E']
else:
self._volumes_list[volume_info['name']] = volume_info
return ('Virtual Disk, id [%s], successfully created' %
(volume_info['id']), '')
# Delete a vdisk
def _cmd_rmvdisk(self, **kwargs):
force = True if 'force' in kwargs else False
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
if not force:
for mapping in self._mappings_list.values():
if mapping['vol'] == vol_name:
return self._errors['CMMVC5840E']
for lcmap in self._lcmappings_list.values():
if ((lcmap['source'] == vol_name) or
(lcmap['target'] == vol_name)):
return self._errors['CMMVC5840E']
del self._volumes_list[vol_name]
return ('', '')
def _cmd_expandvdisksize(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
# Assume unit is gb
if 'size' not in kwargs:
return self._errors['CMMVC5707E']
size = int(kwargs['size'])
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = (
six.text_type(curr_size + addition))
return ('', '')
def _add_port_to_host(self, host_info, **kwargs):
if 'iscsiname' in kwargs:
added_key = 'iscsi_names'
added_val = kwargs['iscsiname'].strip('\'\"')
elif 'hbawwpn' in kwargs:
added_key = 'wwpns'
added_val = kwargs['hbawwpn'].strip('\'\"')
else:
return self._errors['CMMVC5707E']
host_info[added_key].append(added_val)
for v in self._hosts_list.values():
if v['id'] == host_info['id']:
continue
for port in v[added_key]:
if port == added_val:
return self._errors['CMMVC6581E']
return ('', '')
# Make a host
def _cmd_mkhost(self, **kwargs):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
if 'name' in kwargs:
host_name = kwargs['name'].strip('\'\"')
else:
host_name = 'host' + six.text_type(host_info['id'])
if self._is_invalid_name(host_name):
return self._errors['CMMVC6527E']
if host_name in self._hosts_list:
return self._errors['CMMVC6035E']
host_info['host_name'] = host_name
host_info['iscsi_names'] = []
host_info['wwpns'] = []
out, err = self._add_port_to_host(host_info, **kwargs)
if not len(err):
self._hosts_list[host_name] = host_info
return ('Host, id [%s], successfully created' %
(host_info['id']), '')
else:
return (out, err)
# Add ports to an existing host
def _cmd_addhostport(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
host_info = self._hosts_list[host_name]
return self._add_port_to_host(host_info, **kwargs)
# Change host properties
def _cmd_chhost(self, **kwargs):
if 'chapsecret' not in kwargs:
return self._errors['CMMVC5707E']
secret = kwargs['obj'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
self._hosts_list[host_name]['chapsecret'] = secret
return ('', '')
# Remove a host
def _cmd_rmhost(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
for v in self._mappings_list.values():
if (v['host'] == host_name):
return self._errors['CMMVC5871E']
del self._hosts_list[host_name]
return ('', '')
# Create a vdisk-host mapping
def _cmd_mkvdiskhostmap(self, **kwargs):
mapping_info = {}
mapping_info['id'] = self._find_unused_id(self._mappings_list)
if 'host' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['host'] = kwargs['host'].strip('\'\"')
if 'scsi' in kwargs:
mapping_info['lun'] = kwargs['scsi'].strip('\'\"')
else:
mapping_info['lun'] = mapping_info['id']
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['vol'] = kwargs['obj'].strip('\'\"')
if mapping_info['vol'] not in self._volumes_list:
return self._errors['CMMVC5753E']
if mapping_info['host'] not in self._hosts_list:
return self._errors['CMMVC5754E']
if mapping_info['vol'] in self._mappings_list:
return self._errors['CMMVC6071E']
for v in self._mappings_list.values():
if ((v['host'] == mapping_info['host']) and
(v['lun'] == mapping_info['lun'])):
return self._errors['CMMVC5879E']
for v in self._mappings_list.values():
if (v['vol'] == mapping_info['vol']) and ('force' not in kwargs):
return self._errors['CMMVC6071E']
self._mappings_list[mapping_info['id']] = mapping_info
return ('Virtual Disk to Host map, id [%s], successfully created'
% (mapping_info['id']), '')
# Delete a vdisk-host mapping
def _cmd_rmvdiskhostmap(self, **kwargs):
if 'host' not in kwargs:
return self._errors['CMMVC5707E']
host = kwargs['host'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol = kwargs['obj'].strip('\'\"')
mapping_ids = []
for v in self._mappings_list.values():
if v['vol'] == vol:
mapping_ids.append(v['id'])
if not mapping_ids:
return self._errors['CMMVC5753E']
this_mapping = None
for mapping_id in mapping_ids:
if self._mappings_list[mapping_id]['host'] == host:
this_mapping = mapping_id
if this_mapping is None:
return self._errors['CMMVC5753E']
del self._mappings_list[this_mapping]
return ('', '')
# Create a LocalCopy mapping
def _cmd_mklcmap(self, **kwargs):
source = ''
target = ''
copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50'
if 'source' not in kwargs:
return self._errors['CMMVC5707E']
source = kwargs['source'].strip('\'\"')
if source not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'target' not in kwargs:
return self._errors['CMMVC5707E']
target = kwargs['target'].strip('\'\"')
if target not in self._volumes_list:
return self._errors['CMMVC5754E']
if source == target:
return self._errors['CMMVC6303E']
if (self._volumes_list[source]['capacity'] !=
self._volumes_list[target]['capacity']):
return self._errors['CMMVC5754E']
lcmap_info = {}
lcmap_info['source'] = source
lcmap_info['target'] = target
lcmap_info['id'] = self._find_unused_id(self._lcmappings_list)
lcmap_info['name'] = 'lcmap' + lcmap_info['id']
lcmap_info['copyrate'] = copyrate
lcmap_info['progress'] = '0'
lcmap_info['autodelete'] = True if 'autodelete' in kwargs else False
lcmap_info['status'] = 'idle_or_copied'
# Add lcmap to consistency group
if 'consistgrp' in kwargs:
consistgrp = kwargs['consistgrp']
# if is digit, assume is cg id, else is cg name
cg_id = 0
if not consistgrp.isdigit():
for consistgrp_key in self._lcconsistgrp_list.keys():
if (self._lcconsistgrp_list[consistgrp_key]['name'] ==
consistgrp):
cg_id = consistgrp_key
lcmap_info['consistgrp'] = consistgrp_key
break
else:
if int(consistgrp) in self._lcconsistgrp_list.keys():
cg_id = int(consistgrp)
# If can't find exist consistgrp id, return not exist error
if not cg_id:
return self._errors['CMMVC5754E']
lcmap_info['consistgrp'] = cg_id
# Add lcmap to consistgrp
self._lcconsistgrp_list[cg_id]['lcmaps'][lcmap_info['id']] = (
lcmap_info['name'])
self._lc_cg_state_transition('add',
self._lcconsistgrp_list[cg_id])
self._lcmappings_list[lcmap_info['id']] = lcmap_info
return('LocalCopy Mapping, id [' + lcmap_info['id'] +
'], successfully created', '')
def _cmd_prestartlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
if self._next_cmd_error['prestartlcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['prestartlcmap'] = ''
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('prepare', lcmap)
def _cmd_startlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
if self._next_cmd_error['startlcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['startlcmap'] = ''
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('start', lcmap)
def _cmd_stoplcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('stop', lcmap)
def _cmd_rmlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force = True if 'force' in kwargs else False
if self._next_cmd_error['rmlcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['rmlcmap'] = ''
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'delete_force' if force else 'delete'
ret = self._state_transition(function, lcmap)
if lcmap['status'] == 'end':
del self._lcmappings_list[id_num]
return ret
def _cmd_chlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
id_num = kwargs['obj']
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
for key in ['name', 'copyrate', 'autodelete']:
if key in kwargs:
lcmap[key] = kwargs[key]
return ('', '')
# Create a LocalCopy mapping
def _cmd_mklcconsistgrp(self, **kwargs):
lcconsistgrp_info = {}
lcconsistgrp_info['id'] = self._find_unused_id(self._lcconsistgrp_list)
if 'name' in kwargs:
lcconsistgrp_info['name'] = kwargs['name'].strip('\'\"')
else:
lcconsistgrp_info['name'] = 'lccstgrp' + lcconsistgrp_info['id']
if 'autodelete' in kwargs:
lcconsistgrp_info['autodelete'] = True
else:
lcconsistgrp_info['autodelete'] = False
lcconsistgrp_info['status'] = 'empty'
lcconsistgrp_info['start_time'] = None
lcconsistgrp_info['lcmaps'] = {}
self._lcconsistgrp_list[lcconsistgrp_info['id']] = lcconsistgrp_info
return('LocalCopy Consistency Group, id [' + lcconsistgrp_info['id'] +
'], successfully created', '')
def _cmd_prestartlcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if cg_name == self._lcconsistgrp_list[cg_id]['name']:
break
return self._lc_cg_state_transition('prepare',
self._lcconsistgrp_list[cg_id])
def _cmd_startlcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if cg_name == self._lcconsistgrp_list[cg_id]['name']:
break
return self._lc_cg_state_transition('start',
self._lcconsistgrp_list[cg_id])
def _cmd_stoplcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
try:
lcconsistgrps = self._lcconsistgrp_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._lc_cg_state_transition('stop', lcconsistgrps)
def _cmd_rmlcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
force = True if 'force' in kwargs else False
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if cg_name == self._lcconsistgrp_list[cg_id]['name']:
break
if not cg_id:
return self._errors['CMMVC5753E']
lcconsistgrps = self._lcconsistgrp_list[cg_id]
function = 'delete_force' if force else 'delete'
ret = self._lc_cg_state_transition(function, lcconsistgrps)
if lcconsistgrps['status'] == 'end':
del self._lcconsistgrp_list[cg_id]
return ret
def _cmd_migratevdisk(self, **kwargs):
if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs:
return self._errors['CMMVC5707E']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
vdisk = kwargs['vdisk'].strip('\'\"')
if vdisk in self._volumes_list:
curr_mdiskgrp = self._volumes_list
else:
for pool in self._other_pools:
if vdisk in pool:
curr_mdiskgrp = pool
break
else:
return self._errors['CMMVC5754E']
if mdiskgrp == self._flags['instorage_mcs_volpool_name']:
tgt_mdiskgrp = self._volumes_list
elif mdiskgrp == 'openstack2':
tgt_mdiskgrp = self._other_pools['openstack2']
elif mdiskgrp == 'openstack3':
tgt_mdiskgrp = self._other_pools['openstack3']
else:
return self._errors['CMMVC5754E']
if curr_mdiskgrp == tgt_mdiskgrp:
return self._errors['CMMVC6430E']
vol = curr_mdiskgrp[vdisk]
tgt_mdiskgrp[vdisk] = vol
del curr_mdiskgrp[vdisk]
return ('', '')
def _cmd_addvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
vol = self._volumes_list[vol_name]
if 'mdiskgrp' not in kwargs:
return self._errors['CMMVC5707E']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
if mdiskgrp == kwargs['mdiskgrp']:
raise exception.InvalidInput(
reason='mdiskgrp missing quotes %s') % kwargs['mdiskgrp']
copy_info = {}
copy_info['id'] = self._find_unused_id(vol['copies'])
copy_info['status'] = 'online'
copy_info['sync'] = 'no'
copy_info['primary'] = 'no'
copy_info['mdisk_grp_name'] = mdiskgrp
copy_info['mdisk_grp_id'] = str(self._get_mdiskgrp_id(mdiskgrp))
if 'intier' in kwargs:
if kwargs['intier'] == 'on':
copy_info['in_tier'] = 'on'
else:
copy_info['in_tier'] = 'off'
if 'rsize' in kwargs:
if 'compressed' in kwargs:
copy_info['compressed_copy'] = 'yes'
else:
copy_info['compressed_copy'] = 'no'
vol['copies'][copy_info['id']] = copy_info
return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' %
{'vid': vol['id'], 'cid': copy_info['id']}, '')
def _cmd_rmvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if 'copy' not in kwargs:
return self._errors['CMMVC5707E']
copy_id = kwargs['copy'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
vol = self._volumes_list[vol_name]
if copy_id not in vol['copies']:
return self._errors['CMMVC6353E']
del vol['copies'][copy_id]
return ('', '')
def _cmd_chvdisk(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
vol = self._volumes_list[vol_name]
kwargs.pop('obj')
params = ['name', 'warning', 'udid',
'autoexpand', 'intier', 'primary']
for key, value in kwargs.items():
if key == 'intier':
vol['in_tier'] = value
continue
if key == 'warning':
vol['warning'] = value.rstrip('%')
continue
if key == 'name':
vol['name'] = value
del self._volumes_list[vol_name]
self._volumes_list[value] = vol
if key == 'primary':
copies = self._volumes_list[vol_name]['copies']
if value == '0':
copies['0']['primary'] = 'yes'
copies['1']['primary'] = 'no'
elif value == '1':
copies['0']['primary'] = 'no'
copies['1']['primary'] = 'yes'
else:
err = self._errors['CMMVC6353E'][1] % {'VALUE': key}
return ('', err)
if key in params:
vol[key] = value
else:
err = self._errors['CMMVC5709E'][1] % {'VALUE': key}
return ('', err)
return ('', '')
def _cmd_movevdisk(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
vol = self._volumes_list[vol_name]
if 'iogrp' not in kwargs:
return self._errors['CMMVC5707E']
iogrp = kwargs['iogrp']
if iogrp.isdigit():
vol['IO_group_id'] = iogrp
vol['IO_group_name'] = 'io_grp%s' % iogrp
else:
vol['IO_group_id'] = iogrp[6:]
vol['IO_group_name'] = iogrp
return ('', '')
def _cmd_addvdiskaccess(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
return ('', '')
def _cmd_rmvdiskaccess(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
return ('', '')
def _add_host_to_list(self, connector):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
host_info['host_name'] = connector['host']
host_info['iscsi_names'] = []
host_info['wwpns'] = []
if 'initiator' in connector:
host_info['iscsi_names'].append(connector['initiator'])
if 'wwpns' in connector:
host_info['wwpns'] = host_info['wwpns'] + connector['wwpns']
self._hosts_list[connector['host']] = host_info
def _host_in_list(self, host_name):
for k in self._hosts_list:
if k.startswith(host_name):
return k
return None
# Replication related command
# Create a remote copy
def _cmd_mkrcrelationship(self, **kwargs):
master_vol = ''
aux_vol = ''
aux_cluster = ''
master_sys = self._system_list['instorage-mcs-sim']
aux_sys = self._system_list['aux-mcs-sim']
if 'master' not in kwargs:
return self._errors['CMMVC5707E']
master_vol = kwargs['master'].strip('\'\"')
if master_vol not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'aux' not in kwargs:
return self._errors['CMMVC5707E']
aux_vol = kwargs['aux'].strip('\'\"')
if aux_vol not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'cluster' not in kwargs:
return self._errors['CMMVC5707E']
aux_cluster = kwargs['cluster'].strip('\'\"')
if aux_cluster != aux_sys['name']:
return self._errors['CMMVC5754E']
if (self._volumes_list[master_vol]['capacity'] !=
self._volumes_list[aux_vol]['capacity']):
return self._errors['CMMVC5754E']
rcrel_info = {}
rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list)
rcrel_info['name'] = 'rcrel' + rcrel_info['id']
rcrel_info['master_cluster_id'] = master_sys['id']
rcrel_info['master_cluster_name'] = master_sys['name']
rcrel_info['master_vdisk_id'] = self._volumes_list[master_vol]['id']
rcrel_info['master_vdisk_name'] = master_vol
rcrel_info['aux_cluster_id'] = aux_sys['id']
rcrel_info['aux_cluster_name'] = aux_sys['name']
rcrel_info['aux_vdisk_id'] = self._volumes_list[aux_vol]['id']
rcrel_info['aux_vdisk_name'] = aux_vol
rcrel_info['primary'] = 'master'
rcrel_info['consistency_group_id'] = ''
rcrel_info['consistency_group_name'] = ''
rcrel_info['state'] = 'inconsistent_stopped'
rcrel_info['bg_copy_priority'] = '50'
rcrel_info['progress'] = '0'
rcrel_info['freeze_time'] = ''
rcrel_info['status'] = 'online'
rcrel_info['sync'] = ''
rcrel_info['copy_type'] = 'async' if 'async' in kwargs else 'sync'
rcrel_info['cycling_mode'] = ''
rcrel_info['cycle_period_seconds'] = '300'
rcrel_info['master_change_vdisk_id'] = ''
rcrel_info['master_change_vdisk_name'] = ''
rcrel_info['aux_change_vdisk_id'] = ''
rcrel_info['aux_change_vdisk_name'] = ''
self._rcrelationship_list[rcrel_info['name']] = rcrel_info
self._volumes_list[master_vol]['RC_name'] = rcrel_info['name']
self._volumes_list[master_vol]['RC_id'] = rcrel_info['id']
self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name']
self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id']
return('RC Relationship, id [' + rcrel_info['id'] +
'], successfully created', '')
def _cmd_startrcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
primary_vol = None
if 'primary' in kwargs:
primary_vol = kwargs['primary'].strip('\'\"')
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
if rcrel['state'] == 'idling' and not primary_vol:
return self._errors['CMMVC5963E']
self._rc_state_transition('start', rcrel)
if primary_vol:
self._rcrelationship_list[id_num]['primary'] = primary_vol
return ('', '')
def _cmd_stoprcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force_access = True if 'access' in kwargs else False
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'stop_access' if force_access else 'stop'
self._rc_state_transition(function, rcrel)
if force_access:
self._rcrelationship_list[id_num]['primary'] = ''
return ('', '')
def _cmd_switchrcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
id_num = kwargs['obj']
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
if rcrel['state'] == instorage_const.REP_CONSIS_SYNC:
rcrel['primary'] = kwargs['primary']
return ('', '')
else:
return self._errors['CMMVC5753E']
def _cmd_rmrcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force = True if 'force' in kwargs else False
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'delete_force' if force else 'delete'
self._rc_state_transition(function, rcrel)
if rcrel['state'] == 'end':
self._volumes_list[rcrel['master_vdisk_name']]['RC_name'] = ''
self._volumes_list[rcrel['master_vdisk_name']]['RC_id'] = ''
self._volumes_list[rcrel['aux_vdisk_name']]['RC_name'] = ''
self._volumes_list[rcrel['aux_vdisk_name']]['RC_id'] = ''
del self._rcrelationship_list[id_num]
return ('', '')
def _rc_state_transition(self, function, rcrel):
if (function == 'wait' and
'wait' not in self._rc_transitions[rcrel['state']]):
return ('', '')
if rcrel['state'] == 'inconsistent_copying' and function == 'wait':
if rcrel['progress'] == '0':
rcrel['progress'] = '50'
else:
rcrel['progress'] = '100'
rcrel['state'] = 'consistent_synchronized'
return ('', '')
else:
try:
curr_state = rcrel['state']
rcrel['state'] = self._rc_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5982E']
def _cmd_mkippartnership(self, **kwargs):
if 'clusterip' not in kwargs:
return self._errors['CMMVC5707E']
clusterip = kwargs['master'].strip('\'\"')
if 'linkbandwidthmbits' not in kwargs:
return self._errors['CMMVC5707E']
bandwith = kwargs['linkbandwidthmbits'].strip('\'\"')
if 'backgroundcopyrate' not in kwargs:
return self._errors['CMMVC5707E']
copyrate = kwargs['backgroundcopyrate'].strip('\'\"')
if clusterip == '192.168.10.21':
partner_info_id = self._system_list['instorage-mcs-sim']['id']
partner_info_name = self._system_list['instorage-mcs-sim']['name']
else:
partner_info_id = self._system_list['aux-mcs-sim']['id']
partner_info_name = self._system_list['aux-mcs-sim']['name']
partner_info = {}
partner_info['id'] = partner_info_id
partner_info['name'] = partner_info_name
partner_info['location'] = 'remote'
partner_info['type'] = 'ipv4'
partner_info['cluster_ip'] = clusterip
partner_info['event_log_sequence'] = ''
partner_info['chap_secret'] = ''
partner_info['linkbandwidthmbits'] = bandwith
partner_info['backgroundcopyrate'] = copyrate
partner_info['partnership'] = 'fully_configured'
self._partnership_list[partner_info['id']] = partner_info
return('', '')
def _cmd_mkfcpartnership(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
peer_sys = kwargs['obj']
if 'linkbandwidthmbits' not in kwargs:
return self._errors['CMMVC5707E']
bandwith = kwargs['linkbandwidthmbits'].strip('\'\"')
if 'backgroundcopyrate' not in kwargs:
return self._errors['CMMVC5707E']
copyrate = kwargs['backgroundcopyrate'].strip('\'\"')
partner_info = {}
partner_info['id'] = self._system_list[peer_sys]['id']
partner_info['name'] = peer_sys
partner_info['location'] = 'remote'
partner_info['type'] = 'fc'
partner_info['cluster_ip'] = ''
partner_info['event_log_sequence'] = ''
partner_info['chap_secret'] = ''
partner_info['linkbandwidthmbits'] = bandwith
partner_info['backgroundcopyrate'] = copyrate
partner_info['partnership'] = 'fully_configured'
self._partnership_list[partner_info['id']] = partner_info
return('', '')
def _cmd_chpartnership(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
peer_sys = kwargs['obj']
if peer_sys not in self._partnership_list:
return self._errors['CMMVC5753E']
partner_state = ('fully_configured' if 'start'in kwargs
else 'fully_configured_stopped')
self._partnership_list[peer_sys]['partnership'] = partner_state
return('', '')
# The main function to run commands on the management simulator
def execute_command(self, cmd, check_exit_code=True):
try:
kwargs = self._cmd_to_dict(cmd)
except IndexError:
return self._errors['CMMVC5707E']
command = kwargs.pop('cmd')
func = getattr(self, '_cmd_' + command)
out, err = func(**kwargs)
if (check_exit_code) and (len(err) != 0):
raise processutils.ProcessExecutionError(exit_code=1,
stdout=out,
stderr=err,
cmd=' '.join(cmd))
return (out, err)
# After calling this function, the next call to the specified command will
# result in in the error specified
def error_injection(self, cmd, error):
self._next_cmd_error[cmd] = error
def change_vdiskcopy_attr(self, vol_name, key, value, copy="primary"):
if copy == 'primary':
self._volumes_list[vol_name]['copies']['0'][key] = value
elif copy == 'secondary':
self._volumes_list[vol_name]['copies']['1'][key] = value
else:
msg = "The copy should be primary or secondary"
raise exception.InvalidInput(reason=msg)
| {
"content_hash": "cd7fae5977da59aec485c363acdfec50",
"timestamp": "",
"source": "github",
"line_count": 2195,
"max_line_length": 79,
"avg_line_length": 40.84829157175399,
"alnum_prop": 0.4801699716713881,
"repo_name": "phenoxim/cinder",
"id": "f58cd63cbb1f2827a9884095a3f39632bc6207c2",
"size": "90291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20325688"
},
{
"name": "Shell",
"bytes": "16353"
}
],
"symlink_target": ""
} |
import rx
import tx
import led
class Target(object):
def __init__(self):
# our UART consists of a RX and TX path as seen from the target
self.TX_FIFO_PATH = "../../appl/run/spike/tx_pipe"
self.RX_FILE_PATH = "../../appl/run/spike/rx_file"
self.RX_TMP_FILE_PATH = "../../appl/run/spike/_rx_file"
self.LD_FIFO_PATH = "../../appl/run/spike/ld_pipe"
self.BUFFER_SIZE = 1024
def start_threads(self, stream, parent):
self.rx_thread = rx.RxThread(self.RX_FILE_PATH, self.RX_TMP_FILE_PATH)
self.tx_thread = tx.TxThread(self.TX_FIFO_PATH, parent)
self.led_thread = led.LedThread(self.LD_FIFO_PATH, parent);
self.rx_thread.start()
self.tx_thread.start()
self.led_thread.start()
def uart_read(self):
return os.read(self.tx_pipe, self.BUFFER_SIZE)
def uart_write(self, data):
print ">>> %s" % data
rx.rxlock.acquire()
try:
rx_file = open(self.RX_TMP_FILE_PATH, 'a+', 0)
rx_file.write(data)
rx_file.close()
finally:
rx.rxlock.release()
| {
"content_hash": "831b0036a8f0664ce48b784d0f57973b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 33.794117647058826,
"alnum_prop": 0.5648389904264578,
"repo_name": "minth/riscv-security-tutorial",
"id": "1e43e50f283e262b693d17d224f8415be42a5b45",
"size": "2321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/tutgui/target.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37413"
},
{
"name": "C",
"bytes": "1002361"
},
{
"name": "C++",
"bytes": "71370"
},
{
"name": "Makefile",
"bytes": "37205"
},
{
"name": "Python",
"bytes": "11187"
}
],
"symlink_target": ""
} |
from base import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'testing_{{cookiecutter.project_name}}',
'HOST': '',
'USER': '',
'PASSWORD': '',
'CONN_MAX_AGE': 600,
}
}
TEMPLATE_LOADERS = (
(
'django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
),
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'testing_key_prefix'
}
}
ALLOWED_HOSTS = ['.{{cookiecutter.domain_name}}', '{{cookiecutter.domain_name}}.', '*.uhura.de'] # subdomains and FQDN
ROOT_URLCONF = '{{cookiecutter.repo_name}}.urls' | {
"content_hash": "2c3ba745c0d37d2a7c8b65a2b7a3c2fa",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 118,
"avg_line_length": 23,
"alnum_prop": 0.5709382151029748,
"repo_name": "uhuramedia/cookiecutter-django",
"id": "a92bccd6e07101e555717e46b3eb7ea6bd412641",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5140"
},
{
"name": "CSS",
"bytes": "61"
},
{
"name": "HTML",
"bytes": "2570"
},
{
"name": "Makefile",
"bytes": "5652"
},
{
"name": "Python",
"bytes": "19343"
}
],
"symlink_target": ""
} |
from canari.maltego.utils import debug
from canari.framework import configure
from canari.maltego.entities import URL, Phrase
from common.entities import Actor, Case, CoursesOfAction, Incident, Indicator, TTP
from canari.maltego.message import Label, UIMessage
from common.client import search, encode_to_utf8, lower, ThreatCentralError
__author__ = 'Bart Otten'
__copyright__ = '(c) Copyright [2016] Hewlett Packard Enterprise Development LP'
__credits__ = []
__license__ = 'Apache 2.0'
__version__ = '1'
__maintainer__ = 'Bart Otten'
__email__ = 'tc-support@hpe.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
@configure(
label='Search URL in Threat Central',
description='Searches URL in Threat Central',
uuids=['threatcentral.v2.URLToThreatCentral'],
inputs=[('Threat Central', URL)],
debug=False,
remote=False
)
def dotransform(request, response, config):
try:
url = request.fields['url']
except KeyError:
url = request.value
try:
results = search(url)
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
else:
try:
for result in results:
rtype = lower(result.get('type'))
if result.get('tcScore'):
weight = int(result.get('tcScore'))
else:
weight = 1
# Title ID Description
if rtype == 'actor':
# Check Title, if no title get resource > name
# Actor entity can have an empty title field
if result.get('title'):
e = Actor(encode_to_utf8(result.get('title')), weight=weight)
else:
e = Actor(encode_to_utf8(result.get('resource', dict()).get('name')), weight=weight)
e.name = encode_to_utf8(result.get('resource', dict()).get('name'))
e.actor = encode_to_utf8(result.get('resource', dict()).get('name'))
elif rtype == 'case':
e = Case(encode_to_utf8(result.get('title')), weight=weight)
elif rtype == 'coursesofactions':
e = CoursesOfAction(encode_to_utf8(result.get('title')), weight=weight)
elif rtype == 'indicator':
e = Indicator(encode_to_utf8(result.get('title')), weight=weight)
elif rtype == 'incident':
e = Incident(encode_to_utf8(result.get('title')), weight=weight)
# elif rtype == 'tacticstechniquesandprocedures':
elif rtype == 'ttp':
e = TTP(encode_to_utf8(result.get('title')), weight=weight)
else:
# To be safe
e = Phrase(encode_to_utf8(result.get('title')), weight=weight)
debug(rtype)
e.title = encode_to_utf8(result.get('title'))
e.resourceId = result.get('id')
if result.get('description'):
e += Label('Description', '<br/>'.join(encode_to_utf8(result.get('description',
'')).split('\n')))
response += e
except AttributeError as err:
response += UIMessage('Error: {}'.format(err), type='PartialError')
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
except TypeError:
return response
return response
| {
"content_hash": "ae59ce939622dd56c2a745153f6ee384",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 108,
"avg_line_length": 38.97849462365591,
"alnum_prop": 0.5401379310344827,
"repo_name": "ThreatCentral/blackberries",
"id": "bf5e24306efa994444ddc01669698463d3f550d2",
"size": "4228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ThreatCentral/transforms/URLToThreatCentral.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "339767"
}
],
"symlink_target": ""
} |
"""
Fake Manhattan device (65 qubit).
"""
import os
from qiskit.providers.fake_provider import fake_pulse_backend, fake_backend
class FakeManhattanV2(fake_backend.FakeBackendV2):
"""A fake Manhattan backend."""
dirname = os.path.dirname(__file__)
conf_filename = "conf_manhattan.json"
props_filename = "props_manhattan.json"
defs_filename = "defs_manhattan.json"
backend_name = "fake_manhattan_v2"
class FakeManhattan(fake_pulse_backend.FakePulseBackend):
"""A fake Manhattan backend."""
dirname = os.path.dirname(__file__)
conf_filename = "conf_manhattan.json"
props_filename = "props_manhattan.json"
defs_filename = "defs_manhattan.json"
backend_name = "fake_manhattan"
| {
"content_hash": "fa3149f3d1e50c1e6f499a4d3e87232e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 27.96153846153846,
"alnum_prop": 0.7015130674002751,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "13e39360ad17b25a599e2569aeb2306c1f1cb907",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/providers/fake_provider/backends/manhattan/fake_manhattan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donor', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='address',
field=models.CharField(blank=True, max_length=150),
),
migrations.AlterField(
model_name='profile',
name='informationNumber',
field=models.CharField(blank=True, max_length=75),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, max_length=75),
),
migrations.AlterField(
model_name='request',
name='email',
field=models.CharField(blank=True, max_length=75),
),
migrations.AlterField(
model_name='request',
name='phone',
field=models.CharField(blank=True, max_length=75),
),
]
| {
"content_hash": "b5e4422cee7d5f411a9bea28a07292b8",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 63,
"avg_line_length": 27.68421052631579,
"alnum_prop": 0.5475285171102662,
"repo_name": "markeasterling/donormatch",
"id": "1af3feacc13cab73a4d09e274f949bec8e36e549",
"size": "1123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/donor/migrations/0002_auto_20160913_2109.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "17981"
},
{
"name": "JavaScript",
"bytes": "14635"
},
{
"name": "Python",
"bytes": "20271"
}
],
"symlink_target": ""
} |
import os
import six
import copy
import struct
import signal
import logging
import multiprocessing
import hmac
from hashlib import sha1
from hashlib import sha256
from Crypto.Cipher import AES
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(title):
pass
from bucky.errors import ConfigError, ProtocolError
from bucky.udpserver import UDPServer
from bucky.helpers import FileMonitor
log = logging.getLogger(__name__)
class CPUConverter(object):
PRIORITY = -1
def __call__(self, sample):
return ["cpu", sample["plugin_instance"], sample["type_instance"]]
class InterfaceConverter(object):
PRIORITY = -1
def __call__(self, sample):
return filter(None, [
"interface",
sample.get("plugin_instance", ""),
sample.get("type_instance", ""),
sample["type"],
sample["value_name"]
])
class MemoryConverter(object):
PRIORITY = -1
def __call__(self, sample):
return ["memory", sample["type_instance"]]
class DefaultConverter(object):
PRIORITY = -1
def __call__(self, sample):
parts = []
parts.append(sample["plugin"].strip())
if sample.get("plugin_instance"):
parts.append(sample["plugin_instance"].strip())
stype = sample.get("type", "").strip()
if stype and stype != "value":
parts.append(stype)
stypei = sample.get("type_instance", "").strip()
if stypei:
parts.append(stypei)
vname = sample.get("value_name").strip()
if vname and vname != "value":
parts.append(vname)
return parts
DEFAULT_CONVERTERS = {
"cpu": CPUConverter(),
"interface": InterfaceConverter(),
"memory": MemoryConverter(),
"_default": DefaultConverter(),
}
class CollectDTypes(object):
def __init__(self, types_dbs=[]):
self.types = {}
self.type_ranges = {}
if not types_dbs:
types_dbs = filter(os.path.exists, [
"/usr/share/collectd/types.db",
"/usr/local/share/collectd/types.db",
"./tests/data/types.db",
])
if not types_dbs:
raise ConfigError("Unable to locate types.db")
self.types_dbs = types_dbs
self._load_types()
def get(self, name):
t = self.types.get(name)
if t is None:
raise ProtocolError("Invalid type name: %s" % name)
return t
def _load_types(self):
for types_db in self.types_dbs:
with open(types_db) as handle:
for line in handle:
if line.lstrip()[:1] == "#":
continue
if not line.strip():
continue
self._add_type_line(line)
log.info("Loaded collectd types from %s", types_db)
def _add_type_line(self, line):
types = {
"COUNTER": 0,
"GAUGE": 1,
"DERIVE": 2,
"ABSOLUTE": 3
}
name, spec = line.split(None, 1)
self.types[name] = []
self.type_ranges[name] = {}
vals = spec.split(", ")
for val in vals:
vname, vtype, minv, maxv = val.strip().split(":")
vtype = types.get(vtype)
if vtype is None:
raise ValueError("Invalid value type: %s" % vtype)
minv = None if minv == "U" else float(minv)
maxv = None if maxv == "U" else float(maxv)
self.types[name].append((vname, vtype))
self.type_ranges[name][vname] = (minv, maxv)
class CollectDParser(object):
def __init__(self, types_dbs=[], counter_eq_derive=False):
self.types = CollectDTypes(types_dbs=types_dbs)
self.counter_eq_derive = counter_eq_derive
def parse(self, data):
for sample in self.parse_samples(data):
yield sample
def parse_samples(self, data):
types = {
0x0000: self._parse_string("host"),
0x0001: self._parse_time("time"),
0x0008: self._parse_time_hires("time"),
0x0002: self._parse_string("plugin"),
0x0003: self._parse_string("plugin_instance"),
0x0004: self._parse_string("type"),
0x0005: self._parse_string("type_instance"),
0x0006: None, # handle specially
0x0007: self._parse_time("interval"),
0x0009: self._parse_time_hires("interval")
}
sample = {}
for (ptype, data) in self.parse_data(data):
if ptype not in types:
log.debug("Ignoring part type: 0x%02x", ptype)
continue
if ptype != 0x0006:
types[ptype](sample, data)
continue
for vname, vtype, val in self.parse_values(sample["type"], data):
sample["value_name"] = vname
sample["value_type"] = vtype
sample["value"] = val
yield copy.deepcopy(sample)
def parse_data(self, data):
types = set([
0x0000, 0x0001, 0x0002, 0x0003, 0x0004,
0x0005, 0x0006, 0x0007, 0x0008, 0x0009,
0x0100, 0x0101, 0x0200, 0x0210
])
while len(data) > 0:
if len(data) < 4:
raise ProtocolError("Truncated header.")
(part_type, part_len) = struct.unpack("!HH", data[:4])
data = data[4:]
if part_type not in types:
raise ProtocolError("Invalid part type: 0x%02x" % part_type)
part_len -= 4 # includes four header bytes we just parsed
if len(data) < part_len:
raise ProtocolError("Truncated value.")
part_data, data = data[:part_len], data[part_len:]
yield (part_type, part_data)
def parse_values(self, stype, data):
types = {0: "!Q", 1: "<d", 2: "!q", 3: "!Q"}
(nvals,) = struct.unpack("!H", data[:2])
data = data[2:]
if len(data) != 9 * nvals:
raise ProtocolError("Invalid value structure length.")
vtypes = self.types.get(stype)
if nvals != len(vtypes):
raise ProtocolError("Values different than types.db info.")
for i in range(nvals):
if six.PY3:
vtype = data[i]
else:
(vtype,) = struct.unpack("B", data[i])
if vtype != vtypes[i][1]:
if self.counter_eq_derive and \
(vtype, vtypes[i][1]) in ((0, 2), (2, 0)):
# if counter vs derive don't break, assume server is right
log.debug("Type mismatch (counter/derive) for %s/%s",
stype, vtypes[i][0])
else:
raise ProtocolError("Type mismatch with types.db")
data = data[nvals:]
for i in range(nvals):
vdata, data = data[:8], data[8:]
(val,) = struct.unpack(types[vtypes[i][1]], vdata)
yield vtypes[i][0], vtypes[i][1], val
def _parse_string(self, name):
def _parser(sample, data):
if six.PY3:
data = data.decode()
if data[-1] != '\0':
raise ProtocolError("Invalid string detected.")
sample[name] = data[:-1]
return _parser
def _parse_time(self, name):
def _parser(sample, data):
if len(data) != 8:
raise ProtocolError("Invalid time data length.")
(val,) = struct.unpack("!Q", data)
sample[name] = float(val)
return _parser
def _parse_time_hires(self, name):
def _parser(sample, data):
if len(data) != 8:
raise ProtocolError("Invalid hires time data length.")
(val,) = struct.unpack("!Q", data)
sample[name] = val * (2 ** -30)
return _parser
class CollectDCrypto(object):
def __init__(self, cfg):
sec_level = cfg.collectd_security_level
if sec_level in ("sign", "SIGN", "Sign", 1):
self.sec_level = 1
elif sec_level in ("encrypt", "ENCRYPT", "Encrypt", 2):
self.sec_level = 2
else:
self.sec_level = 0
self.auth_file = cfg.collectd_auth_file
self.auth_db = {}
self.cfg_mon = None
if self.auth_file:
self.load_auth_file()
self.cfg_mon = FileMonitor(self.auth_file)
if self.sec_level:
if not self.auth_file:
raise ConfigError("Collectd security level configured but no "
"auth file specified in configuration")
if not self.auth_db:
log.warning("Collectd security level configured but no "
"user/passwd entries loaded from auth file")
def load_auth_file(self):
try:
f = open(self.auth_file)
except IOError as exc:
raise ConfigError("Unable to load collectd's auth file: %r" % exc)
self.auth_db.clear()
for line in f:
line = line.strip()
if not line or line[0] == "#":
continue
user, passwd = line.split(":", 1)
user = user.strip()
passwd = passwd.strip()
if not user or not passwd:
log.warning("Found line with missing user or password")
continue
if user in self.auth_db:
log.warning("Found multiple entries for single user")
self.auth_db[user] = passwd
f.close()
log.info("Loaded collectd's auth file from %s", self.auth_file)
def parse(self, data):
if len(data) < 4:
raise ProtocolError("Truncated header.")
part_type, part_len = struct.unpack("!HH", data[:4])
sec_level = {0x0200: 1, 0x0210: 2}.get(part_type, 0)
if sec_level < self.sec_level:
raise ProtocolError("Packet has lower security level than allowed")
if not sec_level:
return data
if sec_level == 1 and not self.sec_level:
return data[part_len:]
data = data[4:]
part_len -= 4
if len(data) < part_len:
raise ProtocolError("Truncated part payload.")
if self.cfg_mon is not None and self.cfg_mon.modified():
log.info("Collectd authfile modified, reloading")
self.load_auth_file()
if sec_level == 1:
return self.parse_signed(part_len, data)
if sec_level == 2:
return self.parse_encrypted(part_len, data)
def parse_signed(self, part_len, data):
if part_len <= 32:
raise ProtocolError("Truncated signed part.")
sig, data = data[:32], data[32:]
uname_len = part_len - 32
uname = data[:uname_len].decode()
if uname not in self.auth_db:
raise ProtocolError("Signed packet, unknown user '%s'" % uname)
password = self.auth_db[uname].encode()
sig2 = hmac.new(password, msg=data, digestmod=sha256).digest()
if not self._hashes_match(sig, sig2):
raise ProtocolError("Bad signature from user '%s'" % uname)
data = data[uname_len:]
return data
def parse_encrypted(self, part_len, data):
if part_len != len(data):
raise ProtocolError("Enc pkt size disaggrees with header.")
if len(data) <= 38:
raise ProtocolError("Truncated encrypted part.")
uname_len, data = struct.unpack("!H", data[:2])[0], data[2:]
if len(data) <= uname_len + 36:
raise ProtocolError("Truncated encrypted part.")
uname, data = data[:uname_len].decode(), data[uname_len:]
if uname not in self.auth_db:
raise ProtocolError("Couldn't decrypt, unknown user '%s'" % uname)
iv, data = data[:16], data[16:]
password = self.auth_db[uname].encode()
key = sha256(password).digest()
pad_bytes = 16 - (len(data) % 16)
data += b'\0' * pad_bytes
data = AES.new(key, IV=iv, mode=AES.MODE_OFB).decrypt(data)
data = data[:-pad_bytes]
tag, data = data[:20], data[20:]
tag2 = sha1(data).digest()
if not self._hashes_match(tag, tag2):
raise ProtocolError("Bad checksum on enc pkt for '%s'" % uname)
return data
def _hashes_match(self, a, b):
"""Constant time comparison of bytes for py3, strings for py2"""
if len(a) != len(b):
return False
diff = 0
if six.PY2:
a = bytearray(a)
b = bytearray(b)
for x, y in zip(a, b):
diff |= x ^ y
return not diff
class CollectDConverter(object):
def __init__(self, cfg):
self.converters = dict(DEFAULT_CONVERTERS)
self._load_converters(cfg)
def convert(self, sample):
default = self.converters["_default"]
handler = self.converters.get(sample["plugin"], default)
try:
name_parts = handler(sample)
if name_parts is None:
return # treat None as "ignore sample"
name = '.'.join(name_parts)
except:
log.exception("Exception in sample handler %s (%s):", sample["plugin"], handler)
return
host = sample.get("host", "")
return (
host,
name,
sample["value_type"],
sample["value"],
int(sample["time"])
)
def _load_converters(self, cfg):
cfg_conv = cfg.collectd_converters
for conv in cfg_conv:
self._add_converter(conv, cfg_conv[conv], source="config")
if not cfg.collectd_use_entry_points:
return
import pkg_resources
group = 'bucky.collectd.converters'
for ep in pkg_resources.iter_entry_points(group):
name, klass = ep.name, ep.load()
self._add_converter(name, klass, source=ep.module_name)
def _add_converter(self, name, inst, source="unknown"):
if name not in self.converters:
log.info("Converter: %s from %s", name, source)
self.converters[name] = inst
return
kpriority = getattr(inst, "PRIORITY", 0)
ipriority = getattr(self.converters[name], "PRIORITY", 0)
if kpriority > ipriority:
log.info("Replacing: %s", name)
log.info("Converter: %s from %s", name, source)
self.converters[name] = inst
return
log.info("Ignoring: %s (%s) from %s (priority: %s vs %s)",
name, inst, source, kpriority, ipriority)
class CollectDHandler(object):
"""Wraps all CollectD parsing functionality in a class"""
def __init__(self, cfg):
self.crypto = CollectDCrypto(cfg)
self.parser = CollectDParser(cfg.collectd_types,
cfg.collectd_counter_eq_derive)
self.converter = CollectDConverter(cfg)
self.prev_samples = {}
self.last_sample = None
def parse(self, data):
try:
data = self.crypto.parse(data)
except ProtocolError as e:
log.error("Protocol error in CollectDCrypto: %s", e)
return
try:
for sample in self.parser.parse(data):
self.last_sample = sample
stype = sample["type"]
vname = sample["value_name"]
sample = self.converter.convert(sample)
if sample is None:
continue
host, name, vtype, val, time = sample
if not name.strip():
continue
val = self.calculate(host, name, vtype, val, time)
val = self.check_range(stype, vname, val)
if val is not None:
yield host, name, val, time
except ProtocolError as e:
log.error("Protocol error: %s", e)
if self.last_sample is not None:
log.info("Last sample: %s", self.last_sample)
def check_range(self, stype, vname, val):
if val is None:
return
try:
vmin, vmax = self.parser.types.type_ranges[stype][vname]
except KeyError:
log.error("Couldn't find vmin, vmax in CollectDTypes")
return val
if vmin is not None and val < vmin:
log.debug("Invalid value %s (<%s) for %s", val, vmin, vname)
log.debug("Last sample: %s", self.last_sample)
return
if vmax is not None and val > vmax:
log.debug("Invalid value %s (>%s) for %s", val, vmax, vname)
log.debug("Last sample: %s", self.last_sample)
return
return val
def calculate(self, host, name, vtype, val, time):
handlers = {
0: self._calc_counter, # counter
1: lambda _host, _name, v, _time: v, # gauge
2: self._calc_derive, # derive
3: self._calc_absolute # absolute
}
if vtype not in handlers:
log.error("Invalid value type %s for %s", vtype, name)
log.info("Last sample: %s", self.last_sample)
return
return handlers[vtype](host, name, val, time)
def _calc_counter(self, host, name, val, time):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
return
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
log.error("Invalid COUNTER update for: %s:%s" % key)
log.info("Last sample: %s", self.last_sample)
return
if val < pval:
# this is supposed to handle counter wrap around
# see https://collectd.org/wiki/index.php/Data_source
log.debug("COUNTER wrap-around for: %s:%s (%s -> %s)",
host, name, pval, val)
if pval < 0x100000000:
val += 0x100000000 # 2**32
else:
val += 0x10000000000000000 # 2**64
return float(val - pval) / (time - ptime)
def _calc_derive(self, host, name, val, time):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
return
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
log.debug("Invalid DERIVE update for: %s:%s" % key)
log.debug("Last sample: %s", self.last_sample)
return
return float(val - pval) / (time - ptime)
def _calc_absolute(self, host, name, val, time):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
return
_pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
log.error("Invalid ABSOLUTE update for: %s:%s" % key)
log.info("Last sample: %s", self.last_sample)
return
return float(val) / (time - ptime)
class CollectDServer(UDPServer):
"""Single processes CollectDServer"""
def __init__(self, queue, cfg):
super(CollectDServer, self).__init__(cfg.collectd_ip,
cfg.collectd_port)
self.handler = CollectDHandler(cfg)
self.queue = queue
def handle(self, data, addr):
for sample in self.handler.parse(data):
self.queue.put(sample)
return True
class CollectDWorker(multiprocessing.Process):
"""CollectDWorker plugs a CollectDHandler between a pipe and a queue"""
def __init__(self, pipe, queue, cfg, id_num=-1):
super(CollectDWorker, self).__init__()
self.daemon = True
self.name = "CollectDWorker%d" % id_num
self.pipe = pipe
self.queue = queue
self.cfg = cfg
def run(self):
log.info("CollectDWorker up and running")
setproctitle("bucky: %s" % self.name)
handler = CollectDHandler(self.cfg)
while True:
try:
data = self.pipe.recv()
except KeyboardInterrupt:
continue
if data is None:
break
for sample in handler.parse(data):
self.queue.put(sample)
class CollectDServerMP(UDPServer):
"""Multiprocess CollectD server
Starts a configurable (cfg.collectd_workers) number of worker processes.
Routing of incoming packets to worker subsprocesses is performed by
consistent hashing, meaning that all packets from a given IP address will
always go to the same worker.
"""
def __init__(self, queue, cfg):
super(CollectDServerMP, self).__init__(cfg.collectd_ip,
cfg.collectd_port)
self.daemon = False
self.queue = queue
self.cfg = cfg
self.workers = []
def run(self):
def sigterm_handler(signum, frame):
log.info("Received SIGTERM")
self.close()
self.workers = []
for i in range(self.cfg.collectd_workers):
recv, send = multiprocessing.Pipe()
worker = CollectDWorker(recv, self.queue, self.cfg, i)
worker.start()
self.workers.append((worker, send))
signal.signal(signal.SIGTERM, sigterm_handler)
super(CollectDServerMP, self).run()
def handle(self, data, addr):
ip_addr, port = addr
# deterministically map source ip address to worker
index = hash(ip_addr) % len(self.workers)
worker, pipe = self.workers[index]
pipe.send(data)
# check if all is running
for worker, pipe in self.workers:
if not worker.is_alive():
log.error("Worker %s died, stopping server.", worker)
return
return True
def pre_shutdown(self):
log.info("Shutting down CollectDServer")
for worker, pipe in self.workers:
log.info("Stopping worker %s", worker)
pipe.send(None)
for worker, pipe in self.workers:
worker.join(self.cfg.process_join_timeout)
for child in multiprocessing.active_children():
log.error("Child %s didn't die gracefully, terminating", child)
child.terminate()
child.join(1)
def getCollectDServer(queue, cfg):
"""Get the appropriate collectd server (multi processed or not)"""
server = CollectDServerMP if cfg.collectd_workers > 1 else CollectDServer
return server(queue, cfg)
| {
"content_hash": "da7f918259b0af6cedfa67a83f6157bf",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 93,
"avg_line_length": 35.843260188087775,
"alnum_prop": 0.5405807241560259,
"repo_name": "JoseKilo/bucky",
"id": "d1b555c8ed47e3806809a0ca9a9f4d051882fa75",
"size": "23435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bucky/collectd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123893"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
} |
from JumpScale import j
OsisBaseObject=j.core.osis.getOsisBaseObjectClass()
class Group(OsisBaseObject):
"""
identifies a node in the grid
@param netaddr = {mac:[ip1,ip2]}
"""
def __init__(self, ddict={}):
if ddict <> {}:
self.load(ddict)
else:
self.id = ""
self.domain=""
self.gid = j.application.whoAmI.gid
self.roles = []
self.active = True
self.description=""
self.lastcheck=0 #epoch of last time the info updated
self.guid=""
self.users=[]
def getSetGuid(self):
"""
use osis to define & set unique guid (sometimes also id)
"""
self.gid = int(self.gid)
self.id = self.id
# self.sguid=struct.pack("<HH",self.gid,self.id)
self.guid = "%s_%s"%(self.gid,self.id)
self.lastcheck=j.base.time.getTimeEpoch()
return self.guid
| {
"content_hash": "fc5e12282cb8e840d246dd2e06673142",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 65,
"avg_line_length": 23.5609756097561,
"alnum_prop": 0.5279503105590062,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "9350e37aa2d030edc213aae32f40cb650a6d2ebb",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/osis/logic/system/group/model.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
"""Support for NZBGet switches."""
from __future__ import annotations
from typing import Callable
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from . import NZBGetEntity
from .const import DATA_COORDINATOR, DOMAIN
from .coordinator import NZBGetDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list[Entity], bool], None],
) -> None:
"""Set up NZBGet sensor based on a config entry."""
coordinator: NZBGetDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
switches = [
NZBGetDownloadSwitch(
coordinator,
entry.entry_id,
entry.data[CONF_NAME],
),
]
async_add_entities(switches)
class NZBGetDownloadSwitch(NZBGetEntity, SwitchEntity):
"""Representation of a NZBGet download switch."""
def __init__(
self,
coordinator: NZBGetDataUpdateCoordinator,
entry_id: str,
entry_name: str,
):
"""Initialize a new NZBGet switch."""
self._unique_id = f"{entry_id}_download"
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
name=f"{entry_name} Download",
)
@property
def unique_id(self) -> str:
"""Return the unique ID of the switch."""
return self._unique_id
@property
def is_on(self):
"""Return the state of the switch."""
return not self.coordinator.data["status"].get("DownloadPaused", False)
async def async_turn_on(self, **kwargs) -> None:
"""Set downloads to enabled."""
await self.hass.async_add_executor_job(self.coordinator.nzbget.resumedownload)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs) -> None:
"""Set downloads to paused."""
await self.hass.async_add_executor_job(self.coordinator.nzbget.pausedownload)
await self.coordinator.async_request_refresh()
| {
"content_hash": "ba94c850ee08613b4994abd6c90e9694",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 86,
"avg_line_length": 30.41891891891892,
"alnum_prop": 0.6628165259884495,
"repo_name": "adrienbrault/home-assistant",
"id": "4f0eae17c23dc1383bc8bfbf479b528e297ff40c",
"size": "2251",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/nzbget/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from django.core import exceptions
from django.db import connection, DataError
from django.test.utils import override_settings
from olympia.access.models import Group
from olympia.amo.fields import HttpHttpsOnlyURLField, CIDRField
from olympia.amo.tests import TestCase
class HttpHttpsOnlyURLFieldTestCase(TestCase):
domain = 'example.com'
def setUp(self):
super().setUp()
with override_settings(DOMAIN=self.domain):
self.field = HttpHttpsOnlyURLField()
def test_invalid_scheme_validation_error(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean('javascript://foo.com/')
def test_invalid_ftp_scheme_validation_error(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean('ftp://foo.com/')
def test_invalid_ftps_scheme_validation_error(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean('ftps://foo.com/')
def test_no_scheme_assumes_http(self):
assert self.field.clean('foo.com') == 'http://foo.com'
def test_http_scheme(self):
assert self.field.clean('http://foo.com/') == 'http://foo.com/'
def test_https_scheme(self):
assert self.field.clean('https://foo.com/') == 'https://foo.com/'
def test_catches_invalid_url(self):
# https://github.com/mozilla/addons-server/issues/1452
with self.assertRaises(exceptions.ValidationError):
assert self.field.clean('https://test.[com')
def test_with_domain_and_no_scheme(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean('%s' % self.domain)
def test_with_domain_and_http(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean('http://%s' % self.domain)
def test_with_domain_and_https(self):
with self.assertRaises(exceptions.ValidationError):
self.field.clean('https://%s' % self.domain)
def test_domain_is_escaped_in_regex_validator(self):
assert self.field.clean('example-com.fr') == 'http://example-com.fr'
class TestPositiveAutoField(TestCase):
# Just using Group because it's a known user of PositiveAutoField
ClassUsingPositiveAutoField = Group
def test_sql_generated_for_field(self):
schema_editor = connection.schema_editor(atomic=False)
sql, _ = schema_editor.column_sql(
self.ClassUsingPositiveAutoField,
self.ClassUsingPositiveAutoField._meta.get_field('id'),
include_default=False,
)
assert sql == 'integer UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY'
def test_db_field_properties(self):
table_name = self.ClassUsingPositiveAutoField._meta.db_table
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT column_type, column_key, extra
FROM information_schema.columns
WHERE table_name='%s' and column_name='id' and
table_schema=DATABASE();
"""
% table_name
)
((column_type, column_key, extra),) = cursor.fetchall()
assert column_type == 'int(10) unsigned' or column_type == 'int unsigned'
assert column_key == 'PRI'
assert extra == 'auto_increment'
def test_unsigned_int_limits(self):
self.ClassUsingPositiveAutoField.objects.create(id=1)
mysql_max_signed_int = 2147483647
self.ClassUsingPositiveAutoField.objects.create(id=mysql_max_signed_int + 10)
with self.assertRaises(DataError):
self.ClassUsingPositiveAutoField.objects.create(id=-1)
class TestCIDRField(TestCase):
def setUp(self):
super().setUp()
self.field = CIDRField().formfield()
def test_validates_ip6_cidr(self):
with self.assertRaises(exceptions.ValidationError):
# Host bit set
self.field.clean('::1/28')
self.field.clean('fe80::/28')
def test_validates_ip4_cidr(self):
with self.assertRaises(exceptions.ValidationError):
# Host bit set
self.field.clean('127.0.0.1/28')
self.field.clean('127.0.0.0/28')
| {
"content_hash": "7a2e811ead17cd80bda7b4be69e5e65f",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 85,
"avg_line_length": 36.48717948717949,
"alnum_prop": 0.6446474584211759,
"repo_name": "mozilla/olympia",
"id": "0a15983c2baf3c92dabf95e6cfe29f637f09dc8c",
"size": "4269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/amo/tests/test_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
from django.conf import settings
def admin_media_prefix(request):
'''
Starting from django 1.4, the static files belonging to django admin follow
the standard conventions.
'''
return {'ADMIN_MEDIA_PREFIX': settings.STATIC_URL + 'admin/' } | {
"content_hash": "90e305b1a069817db241fad8fa6605f3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.7,
"repo_name": "alixedi/django_popcorn",
"id": "237280cacaa3ee473cd1bc820e0bc32a77ba98d9",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "popcorn/context_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "39677"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""Download modules for built-in datasets.
Download functions accept two arguments:
* `save_directory` : Where to save the downloaded files
* `clear` : If `True`, clear the downloaded files. Defaults to `False`.
"""
from fuel.downloaders import binarized_mnist
from fuel.downloaders import cifar10
from fuel.downloaders import cifar100
from fuel.downloaders import mnist
all_downloaders = (
('binarized_mnist', binarized_mnist.fill_subparser),
('cifar10', cifar10.fill_subparser),
('cifar100', cifar100.fill_subparser),
('mnist', mnist.fill_subparser))
| {
"content_hash": "58705303654e1518a30dd31e35271e21",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.7452006980802792,
"repo_name": "lamblin/fuel",
"id": "f848b104ad2381cf32408287e92c31d1f86659de",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel/downloaders/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "218851"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import logging
from collections import OrderedDict
import scipy.sparse
import numpy as np
from typing import (
Any,
Dict,
Text,
List,
Tuple,
Callable,
Set,
Optional,
Type,
Union,
)
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.tokenizers.spacy_tokenizer import (
POS_TAG_KEY,
SpacyTokenizer,
)
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer
from rasa.nlu.constants import TOKENS_NAMES
from rasa.shared.constants import DOCS_URL_COMPONENTS
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.constants import TEXT
from rasa.shared.exceptions import InvalidConfigException
import rasa.shared.utils.io
import rasa.utils.io
logger = logging.getLogger(__name__)
END_OF_SENTENCE = "EOS"
BEGIN_OF_SENTENCE = "BOS"
FEATURES = "features"
@DefaultV1Recipe.register(
DefaultV1Recipe.ComponentType.MESSAGE_FEATURIZER, is_trainable=True
)
class LexicalSyntacticFeaturizer(SparseFeaturizer, GraphComponent):
"""Extracts and encodes lexical syntactic features.
Given a sequence of tokens, this featurizer produces a sequence of features
where the `t`-th feature encodes lexical and syntactic information about the `t`-th
token and it's surrounding tokens.
In detail: The lexical syntactic features can be specified via a list of
configurations `[c_0, c_1, ..., c_n]` where each `c_i` is a list of names of
lexical and syntactic features (e.g. `low`, `suffix2`, `digit`).
For a given tokenized text, the featurizer will consider a window of size `n`
around each token and evaluate the given list of configurations as follows:
- It will extract the features listed in `c_m` where `m = (n-1)/2` if n is even and
`n/2` from token `t`
- It will extract the features listed in `c_{m-1}`,`c_{m-2}` ... , from the last,
second to last, ... token before token `t`, respectively.
- It will extract the features listed `c_{m+1}`, `c_{m+1}`, ... for the first,
second, ... token `t`, respectively.
It will then combine all these features into one feature for position `t`.
Example:
If we specify `[['low'], ['upper'], ['prefix2']]`, then for each position `t`
the `t`-th feature will encode whether the token at position `t` is upper case,
where the token at position `t-1` is lower case and the first two characters
of the token at position `t+1`.
"""
FILENAME_FEATURE_TO_IDX_DICT = "feature_to_idx_dict.pkl"
# NOTE: "suffix5" of the token "is" will be "is". Hence, when combining multiple
# prefixes, short words will be represented/encoded repeatedly.
_FUNCTION_DICT: Dict[Text, Callable[[Token], Union[Text, bool, None]]] = {
"low": lambda token: token.text.islower(),
"title": lambda token: token.text.istitle(),
"prefix5": lambda token: token.text[:5],
"prefix2": lambda token: token.text[:2],
"suffix5": lambda token: token.text[-5:],
"suffix3": lambda token: token.text[-3:],
"suffix2": lambda token: token.text[-2:],
"suffix1": lambda token: token.text[-1:],
"pos": lambda token: token.data.get(POS_TAG_KEY, None),
"pos2": lambda token: token.data.get(POS_TAG_KEY, [])[:2]
if POS_TAG_KEY in token.data
else None,
"upper": lambda token: token.text.isupper(),
"digit": lambda token: token.text.isdigit(),
}
SUPPORTED_FEATURES = sorted(
set(_FUNCTION_DICT.keys()).union([END_OF_SENTENCE, BEGIN_OF_SENTENCE])
)
@classmethod
def _extract_raw_features_from_token(
cls, feature_name: Text, token: Token, token_position: int, num_tokens: int,
) -> Text:
"""Extracts a raw feature from the token at the given position.
Args:
feature_name: the name of a supported feature
token: the token from which we want to extract the feature
token_position: the position of the token inside the tokenized text
num_tokens: the total number of tokens in the tokenized text
Returns:
the raw feature value as text
"""
if feature_name not in cls.SUPPORTED_FEATURES:
raise InvalidConfigException(
f"Configured feature '{feature_name}' not valid. Please check "
f"'{DOCS_URL_COMPONENTS}' for valid configuration parameters."
)
if feature_name == END_OF_SENTENCE:
return str(token_position == num_tokens - 1)
if feature_name == BEGIN_OF_SENTENCE:
return str(token_position == 0)
return str(cls._FUNCTION_DICT[feature_name](token))
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Tokenizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Returns the component's default config."""
return {
**SparseFeaturizer.get_default_config(),
FEATURES: [
["low", "title", "upper"],
["BOS", "EOS", "low", "upper", "title", "digit"],
["low", "title", "upper"],
],
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
feature_to_idx_dict: Optional[Dict[Tuple[int, Text], Dict[Text, int]]] = None,
) -> None:
"""Instantiates a new `LexicalSyntacticFeaturizer` instance."""
super().__init__(execution_context.node_name, config)
# graph component
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
# featurizer specific
self._feature_config = self._config[FEATURES]
self._set_feature_to_idx_dict(
feature_to_idx_dict or {}, check_consistency_with_config=True
)
@classmethod
def validate_config(cls, config: Dict[Text, Any]) -> None:
"""Validates that the component is configured properly."""
if FEATURES not in config:
return # will be replaced with default
feature_config = config[FEATURES]
message = (
f"Expected configuration of `features` to be a list of lists that "
f"that contain names of lexical and syntactic features "
f"(i.e. {cls.SUPPORTED_FEATURES}). "
f"Received {feature_config} instead. "
)
try:
configured_feature_names = set(
feature_name
for pos_config in feature_config
for feature_name in pos_config
)
except TypeError as e:
raise InvalidConfigException(message) from e
if configured_feature_names.difference(cls.SUPPORTED_FEATURES):
raise InvalidConfigException(message)
def _set_feature_to_idx_dict(
self,
feature_to_idx_dict: Dict[Tuple[int, Text], Dict[Text, int]],
check_consistency_with_config: bool = False,
) -> None:
"""Sets the "feature" to index mapping.
Here, "feature" denotes the combination of window position, feature name,
and feature_value.
Args:
feature_to_idx_dict: mapping from tuples of window position and feature name
to a mapping from feature values to indices
check_consistency_with_config: whether the consistency with the current
`self.config` should be checked
"""
self._feature_to_idx_dict = feature_to_idx_dict
self._number_of_features = sum(
[
len(feature_values.values())
for feature_values in self._feature_to_idx_dict.values()
]
)
if check_consistency_with_config:
known_features = set(self._feature_to_idx_dict.keys())
not_in_config = known_features.difference(
(
(window_idx, feature_name)
for window_idx, feature_names in enumerate(self._feature_config)
for feature_name in feature_names
)
)
if not_in_config:
rasa.shared.utils.io.raise_warning(
f"A feature to index mapping has been loaded that does not match "
f"the configured features. The given mapping configures "
f" (position in window, feature_name): {not_in_config}. "
f" These are not specified in the given config "
f" {self._feature_config}. "
f"Continuing with constant values for these features. "
)
def train(self, training_data: TrainingData) -> Resource:
"""Trains the featurizer.
Args:
training_data: the training data
Returns:
the resource from which this trained component can be loaded
"""
self.warn_if_pos_features_cannot_be_computed(training_data)
feature_to_idx_dict = self._create_feature_to_idx_dict(training_data)
self._set_feature_to_idx_dict(feature_to_idx_dict=feature_to_idx_dict)
if not self._feature_to_idx_dict:
rasa.shared.utils.io.raise_warning(
"No lexical syntactic features could be extracted from the training "
"data. In order for this component to work you need to define "
"`features` that can be found in the given training data."
)
self.persist()
return self._resource
def warn_if_pos_features_cannot_be_computed(
self, training_data: TrainingData
) -> None:
"""Warn if part-of-speech features are needed but not given."""
training_example = next(
(
message
for message in training_data.training_examples
if message.get(TOKENS_NAMES[TEXT], [])
),
Message(),
)
tokens_example = training_example.get(TOKENS_NAMES[TEXT], [])
configured_feature_names = set(
feature_name
for pos_config in self._feature_config
for feature_name in pos_config
)
if {"pos", "pos2"}.intersection(
configured_feature_names
) and not tokens_example[0].data.get(POS_TAG_KEY, []):
rasa.shared.utils.io.raise_warning(
f"Expected training data to include tokens with part-of-speech tags"
f"because the given configuration includes part-of-speech features "
f"`pos` and/or `pos2`. "
f"Please add a {SpacyTokenizer.__name__} to your "
f"configuration if you want to use the part-of-speech-features in the"
f"{self.__class__.__name__}. "
f"Continuing without the part-of-speech-features."
)
def _create_feature_to_idx_dict(
self, training_data: TrainingData
) -> Dict[Tuple[int, Text], Dict[Text, int]]:
"""Create a nested dictionary of all feature values.
Returns:
a nested mapping that maps from tuples of positions (in the window) and
supported feature names to "raw feature to index" mappings, i.e.
mappings that map the respective raw feature values to unique indices
(where `unique` means unique with respect to all indices in the
*nested* mapping)
"""
# collect all raw feature values
feature_vocabulary: Dict[Tuple[int, Text], Set[Text]] = dict()
for example in training_data.training_examples:
tokens = example.get(TOKENS_NAMES[TEXT], [])
sentence_features = self._map_tokens_to_raw_features(tokens)
for token_features in sentence_features:
for position_and_feature_name, feature_value in token_features.items():
feature_vocabulary.setdefault(position_and_feature_name, set()).add(
feature_value
)
# assign a unique index to each feature value
return self._build_feature_to_index_map(feature_vocabulary)
def _map_tokens_to_raw_features(
self, tokens: List[Token]
) -> List[Dict[Tuple[int, Text], Text]]:
"""Extracts the raw feature values.
Args:
tokens: a tokenized text
Returns:
a list of feature dictionaries for each token in the given list
where each feature dictionary maps a tuple containing
- a position (in the window) and
- a supported feature name
to the corresponding raw feature value
"""
sentence_features = []
# in case of an even number we will look at one more word before,
# e.g. window size 4 will result in a window range of
# [-2, -1, 0, 1] (0 = current word in sentence)
window_size = len(self._feature_config)
half_window_size = window_size // 2
window_range = range(-half_window_size, half_window_size + window_size % 2)
assert len(window_range) == window_size
for anchor in range(len(tokens)):
token_features: Dict[Tuple[int, Text], Text] = {}
for window_position, relative_position in enumerate(window_range):
absolute_position = anchor + relative_position
# skip, if current_idx is pointing to a non-existing token
if absolute_position < 0 or absolute_position >= len(tokens):
continue
token = tokens[absolute_position]
for feature_name in self._feature_config[window_position]:
token_features[
(window_position, feature_name)
] = self._extract_raw_features_from_token(
token=token,
feature_name=feature_name,
token_position=absolute_position,
num_tokens=len(tokens),
)
sentence_features.append(token_features)
return sentence_features
@staticmethod
def _build_feature_to_index_map(
feature_vocabulary: Dict[Tuple[int, Text], Set[Text]]
) -> Dict[Tuple[int, Text], Dict[Text, int]]:
"""Creates a nested dictionary for mapping raw features to indices.
Args:
feature_vocabulary: a mapping from tuples of positions (in the window) and
supported feature names to the set of possible feature values
Returns:
a nested mapping that maps from tuples of positions (in the window) and
supported feature names to "raw feature to index" mappings, i.e.
mappings that map the respective raw feature values to unique indices
(where `unique` means unique with respect to all indices in the
*nested* mapping)
"""
# Note that this will only sort the top level keys - and we keep
# doing it to ensure consistently with what was done before)
ordered_feature_vocabulary: OrderedDict[
Tuple[int, Text], Set[Text]
] = OrderedDict(sorted(feature_vocabulary.items()))
# create the nested mapping
feature_to_idx_dict: Dict[Tuple[int, Text], Dict[Text, int]] = {}
offset = 0
for (
position_and_feature_name,
feature_values,
) in ordered_feature_vocabulary.items():
sorted_feature_values = sorted(feature_values)
feature_to_idx_dict[position_and_feature_name] = {
feature_value: feature_idx
for feature_idx, feature_value in enumerate(
sorted_feature_values, start=offset
)
}
offset += len(feature_values)
return feature_to_idx_dict
def process(self, messages: List[Message]) -> List[Message]:
"""Featurizes all given messages in-place.
Args:
messages: messages to be featurized.
Returns:
The same list with the same messages after featurization.
"""
for message in messages:
self._process_message(message)
return messages
def process_training_data(self, training_data: TrainingData) -> TrainingData:
"""Processes the training examples in the given training data in-place.
Args:
training_data: the training data
Returns:
same training data after processing
"""
self.process(training_data.training_examples)
return training_data
def _process_message(self, message: Message) -> None:
"""Featurizes the given message in-place.
Args:
message: a message to be featurized
"""
if not self._feature_to_idx_dict:
rasa.shared.utils.io.raise_warning(
f"The {self.__class__.__name__} {self._identifier} has not been "
f"trained properly yet. "
f"Continuing without adding features from this featurizer."
)
return
tokens = message.get(TOKENS_NAMES[TEXT])
if tokens:
sentence_features = self._map_tokens_to_raw_features(tokens)
sparse_matrix = self._map_raw_features_to_indices(sentence_features)
self.add_features_to_message(
# FIXME: create sentence feature and make `sentence` non optional
sequence=sparse_matrix,
sentence=None,
attribute=TEXT,
message=message,
)
def _map_raw_features_to_indices(
self, sentence_features: List[Dict[Tuple[int, Text], Any]]
) -> scipy.sparse.coo_matrix:
"""Converts the raw features to one-hot encodings.
Requires the "feature" to index dictionary, i.e. the featurizer must have
been trained.
Args:
sentence_features: a list of feature dictionaries where the `t`-th feature
dictionary maps a tuple containing
- a position (in the window) and
- a supported feature name
to the raw feature value extracted from the window around the `t`-th token.
Returns:
a sparse matrix where the `i`-th row is a multi-hot vector that encodes the
raw features extracted from the window around the `i`-th token
"""
rows = []
cols = []
shape = (len(sentence_features), self._number_of_features)
for token_idx, token_features in enumerate(sentence_features):
for position_and_feature_name, feature_value in token_features.items():
mapping = self._feature_to_idx_dict.get(position_and_feature_name)
if not mapping:
continue
feature_idx = mapping.get(feature_value, -1)
if feature_idx > -1:
rows.append(token_idx)
cols.append(feature_idx)
rows = np.array(rows)
cols = np.array(cols)
data = np.ones(len(rows))
return scipy.sparse.coo_matrix((data, (rows, cols)), shape=shape)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> LexicalSyntacticFeaturizer:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> LexicalSyntacticFeaturizer:
"""Loads trained component (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
feature_to_idx_dict = rasa.utils.io.json_unpickle(
model_path / cls.FILENAME_FEATURE_TO_IDX_DICT,
encode_non_string_keys=True,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
feature_to_idx_dict=feature_to_idx_dict,
)
except ValueError:
logger.debug(
f"Failed to load `{cls.__class__.__name__}` from model storage. "
f"Resource '{resource.name}' doesn't exist."
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
)
def persist(self) -> None:
"""Persist this model (see parent class for full docstring)."""
if not self._feature_to_idx_dict:
return None
with self._model_storage.write_to(self._resource) as model_path:
rasa.utils.io.json_pickle(
model_path / self.FILENAME_FEATURE_TO_IDX_DICT,
self._feature_to_idx_dict,
encode_non_string_keys=True,
)
| {
"content_hash": "3c8bc53128c5380e1eac8679f517f5db",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 88,
"avg_line_length": 40.46111111111111,
"alnum_prop": 0.5976017209025585,
"repo_name": "RasaHQ/rasa_nlu",
"id": "2c3d5e91f107fb2bd9981a869307c9606fa68721",
"size": "21849",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
from c7n.utils import yaml_load
from .common import BaseTest
import logging
from pprint import pformat
logger = logging.getLogger(name="c7n.tests")
class PutMetricsTest(BaseTest):
record = False
EXAMPLE_EC2_POLICY = """
policies:
- name: track-attached-ebs
resource: ec2
comment: |
Put the count of the number of EBS attached disks to an instance
#filters:
# - Name: tracked-ec2-instance
actions:
- type: put-metric
key: BlockDeviceMappings[].DeviceName
namespace: Usage Metrics
metric_name: Attached Disks
dimensions:
- { a: b }
op: distinct_count
"""
EXAMPLE_S3_POLICY = """
policies:
- name: bucket-count
resource: s3
comment: |
Count all the buckets!
#filters:
# - Name: passthru
# type: value
# key: Name
# value: 0
actions:
- type: put-metric
key: Name
namespace: Usage Metrics
metric_name: S3 Buckets
op: count
"""
def _get_test_policy(self, name, yaml_doc, record=False):
if record:
logger.warn("TestPutMetrics is RECORDING")
session_factory = self.record_flight_data("test_cw_put_metrics_" + name)
else:
logger.debug("TestPutMetrics is replaying")
session_factory = self.replay_flight_data("test_cw_put_metrics_" + name)
policy = self.load_policy(
yaml_load(yaml_doc)["policies"][0], session_factory=session_factory
)
return policy
def _test_putmetrics_s3(self):
""" This test fails when replaying flight data due to an issue with placebo.
"""
policy = self._get_test_policy(
name="s3test", yaml_doc=self.EXAMPLE_S3_POLICY, record=self.record
)
resources = policy.run()
logger.debug(
"these are the results from the policy, assumed to be resources that were processed"
)
logger.debug(pformat(resources))
self.assertGreaterEqual(
len(resources), 1, "PutMetricsTest appears to have processed 0 resources."
)
def test_putmetrics_ec2(self):
policy = self._get_test_policy(
name="ec2test", yaml_doc=self.EXAMPLE_EC2_POLICY, record=self.record
)
resources = policy.run()
logger.debug(
"these are the results from the policy, assumed to be resources that were processed"
)
logger.debug(pformat(resources))
self.assertGreaterEqual(
len(resources),
1,
"PutMetricsTest appears to have processed 0 resources. "
"Are there any running ec2 instances?",
)
def test_putmetrics_permissions(self):
from c7n.actions import PutMetric
self.assertTrue("cloudwatch:PutMetricData" in PutMetric.permissions)
pma = PutMetric()
self.assertTrue("cloudwatch:PutMetricData" in pma.get_permissions())
def test_putmetrics_schema(self):
import jsonschema
from c7n.actions import PutMetric
data = yaml_load(self.EXAMPLE_EC2_POLICY)
action_schema = PutMetric.schema
res = jsonschema.validate(data["policies"][0]["actions"][0], action_schema)
self.assertIsNone(res, "PutMetric.schema failed to validate.")
| {
"content_hash": "8c7819c9a565a4846c92d39acd596daf",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 96,
"avg_line_length": 34.822429906542055,
"alnum_prop": 0.5501878690284487,
"repo_name": "capitalone/cloud-custodian",
"id": "737a934e078b420738e987611da5965acb4d88b3",
"size": "3848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_put_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import StringMessage
from models import BooleanMessage
from models import TeeShirtSize
from models import (Profile, ProfileMiniForm, ProfileForm)
from models import (Conference, ConferenceForm, ConferenceForms,
ConferenceQueryForm, ConferenceQueryForms)
from models import (Session, SessionForm, SessionForms)
from models import (Speaker, SpeakerForm, SpeakerForms)
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
MEMCACHE_FSPEAKER_KEY = "RECENT_FEATURED_SPEAKER"
FSPEAKER_TPL = ('{0} featured in the following sessions: {1}')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESS_DEFAULTS = {
"highlights": ['Default', 'Highlights'],
"typeOfSession": "lecture",
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
WISHLIST_POST = endpoints.ResourceContainer(
sessionKey=messages.StringField(1),
)
USER_SESSIONS_POST = endpoints.ResourceContainer(
date=messages.StringField(1, required=True),
dateTo=messages.StringField(2),
)
SESS_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
SESS_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESS_PUT_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeSessionKey=messages.StringField(1),
)
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
speaker=messages.StringField(1),
)
SP_GET_REQUEST = endpoints.ResourceContainer(
websafeSpeakerKey=messages.StringField(1),
)
SP_POST_REQUEST = endpoints.ResourceContainer(
SpeakerForm,
)
SP_PUT_REQUEST = endpoints.ResourceContainer(
SpeakerForm,
websafeSpeakerKey=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID,
API_EXPLORER_CLIENT_ID,
ANDROID_CLIENT_ID,
IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object.
User logged in is required
If user's profile doesn't exist for some reason create it.
return:
ConferenceForm/request.
"""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get/create profile
prof = self._getProfileFromUser()
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = prof.key
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
"""Update conference Object
Only owner must be allowed
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(
conf,
getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@staticmethod
def _notifyFollowers():
"""Query confs that have followers, and open seats.
Send emails to the followers of each conf and remove them from the list.
(executed by SetNotificationHandler cron job)
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable > 0,
Conference.hasFollowers == True
)
).fetch()
for conf in confs:
for follower in conf.followedBy:
taskqueue.add(params={'email': follower, 'conference': conf.name},
url='/tasks/send_email_2_follower')
conf.followedBy = []
conf.put()
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/follow/{websafeConferenceKey}',
http_method='GET', name='followConference')
def followConference(self, request):
"""Add user to the followers list of the conf,
This list is used to notify users when a conf becomes available again
Returns:
True: when succees
False: if no conf or conf is not full
"""
retVal = True
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
email = user.email()
wsck = request.websafeConferenceKey
c_key = ndb.Key(urlsafe=wsck)
if c_key.kind() != "Conference":
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
conf = c_key.get()
if email in conf.followedBy:
raise ConflictException(
"You already follow this conference")
if conf.seatsAvailable > 0:
retVal = False
else:
conf.followedBy.append(email)
conf.put()
return BooleanMessage(data=retVal)
# - - - Conference Sessions - - - - - - - - - - - - - - - - - - -
def _createSessionObject(self, request):
""" Create Session Object
If a speaker is specified, check validity and add his key to the session
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
wsck = request.websafeConferenceKey
del data['websafeKey']
conf_key = ndb.Key(urlsafe=wsck)
conf = conf_key.get()
# check that conference exists
if not conf or conf_key.kind() != 'Conference':
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Create the Session Object from Input
# add default values for those missing (both data model & outbound Message)
for df in SESS_DEFAULTS:
if data[df] in (None, []):
data[df] = SESS_DEFAULTS[df]
setattr(request, df, SESS_DEFAULTS[df])
# convert dates from strings to Date and Time objects respectively
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
c_key = conf.key
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
s_key = ndb.Key(Session, s_id, parent=c_key)
data['key'] = s_key
# check if speaker is provided and exists
if data['speaker']:
speaker = self._getSpeaker(data['speaker'])
data['speaker'] = speaker.key
# abort if no speaker
if not speaker:
raise endpoints.NotFoundException('No speaker found with key: %s' % data['speaker'])
# add the task for featured speaker
taskqueue.add(params={'wsck': wsck, 'speaker': speaker.fullName},
url='/tasks/featured_speaker')
del data['websafeConferenceKey']
Session(**data).put()
return self._copySessionToForm(request)
@ndb.transactional(xg=True)
def _updateSessionObject(self, request):
""" Update Session Object. Only conf owner can update.
If a speaker is specified append the session key (urlsafe)
to speaker's featured sessions.
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy SessionForm/ProtoRPC Message into dict
wssk = request.websafeSessionKey
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
k = ndb.Key(urlsafe=wssk)
session = k.get()
# check that conference exists
if not session or k.kind() != 'Session':
raise endpoints.NotFoundException(
'No session found with key: %s' % wssk)
conf = session.conference
wsck = conf.key.urlsafe()
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# check if speaker is provided and exists
if data['speaker']:
# abort if speaker
speaker = self._getSpeaker(data['speaker'])
if not speaker:
raise endpoints.NotFoundException('No speaker found with key: %s' % data['speaker'])
# add the task for featured speaker
taskqueue.add(params={'wsck': wsck, 'speaker': speaker.fullName},
url='/tasks/featured_speaker')
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name == 'startTime':
data = datetime.strptime(data, "%H:%M").time()
if field.name == 'date':
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'speaker':
data = speaker.key
# write to Conference object
setattr(session, field.name, data)
session.put()
return self._copySessionToForm(request)
# Helper to Copy relevant fields from Session to SessionForm."""
def _copySessionToForm(self, sess):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(sess, field.name):
# convert Date to date string; just copy others
if field.name == 'date':
setattr(sf, field.name, str(getattr(sess, field.name)))
elif field.name == 'speaker':
sp_key = getattr(sess, field.name)
if sp_key:
setattr(sf, field.name, str(sp_key))
elif field.name == 'startTime':
setattr(sf, field.name, str(getattr(sess, field.name)))
else:
setattr(sf, field.name, getattr(sess, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, sess.key.urlsafe())
# Checks that all required fields are initialized.
sf.check_initialized()
return sf
# Given a conference, return all sessions
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/sessions/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Return all sessions by conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck)
if conf.kind() != "Conference":
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
sessions = conf.get().sessions
sessions = sessions.order(Session.date, Session.startTime, Session.name)
# return individual SessionForm object per Session
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions])
# Given a conference, return all sessions of a specified type
@endpoints.method(SESS_GET_REQUEST, SessionForms,
path='conference/sessions/{websafeConferenceKey}/type/{typeOfSession}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Query sessions for a specified type (by websafeConferenceKey)."""
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck)
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
sessions = Session.query(ancestor=conf)
sessions = sessions.filter(Session.typeOfSession == request.typeOfSession)
sessions = sessions.order(Session.date, Session.startTime, Session.name)
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions]
)
@endpoints.method(SP_GET_REQUEST, SessionForms,
path='speakers/{websafeSpeakerKey}/sessions',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Given a speaker, return all sessions given by this particular speaker,
across all conferences (by speaker's fullname).
"""
wsspk = request.websafeSpeakerKey
sp_key = ndb.Key(urlsafe=wsspk)
speaker = sp_key.get()
if not speaker or sp_key.kind() != 'Speaker':
raise endpoints.NotFoundException(
'No speaker found by the key: %s' % wsspk)
sessions = speaker.featuredSessions.fetch()
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions]
)
# Update Session Endpoint
@endpoints.method(SESS_PUT_REQUEST, SessionForm,
path='conference/sessions/update/{websafeSessionKey}',
http_method='PUT', name='updateSession')
def updateSession(self, request):
"""Update a session in conference (by websafeConferenceKey, websafeSessionKey)."""
return self._updateSessionObject(request)
@endpoints.method(SESS_POST_REQUEST, SessionForm,
path='conference/sessions/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new session in conference (by websafeConferenceKey)."""
return self._createSessionObject(request)
# Return all sessions which are not workshop and are before 7 AM.
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/query',
http_method='GET', name='getSessionsProblematicQuery')
def getSessionsProblematicQuery(self, request):
"""Query sessions with two inequallite filters"""
q = Session.query()
# get time limits
time_up = datetime.strptime('19:00', '%H:%M').time()
# ndb filter one inequality ( typeOfSession)
q = q.filter(Session.typeOfSession != "workshop")
# This has to be first
q = q.order(Session.typeOfSession)
q = q.order(Session.date, Session.startTime, Session.name)
# filter out sessions by time limits
sessions = [sess for sess in q if sess.startTime and sess.startTime < time_up]
return SessionForms(items=[self._copySessionToForm(sess)
for sess in sessions])
@endpoints.method(USER_SESSIONS_POST, SessionForms,
path='sessions/schedule',
http_method='GET', name='getUserSessionsSchedule')
def getUserSessionsSchedule(self, request):
"""query sessions given a date for conferences the user has registered"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
c_keys = [ndb.Key(urlsafe=wsck) for wsck in profile.conferenceKeysToAttend]
confs = ndb.get_multi(c_keys)
if not confs:
raise endpoints.NotFoundException('You haven\'t registered in any conference')
q = Session.query()
date = datetime.strptime(request.date[:10], "%Y-%m-%d").date()
# if given 2 dates search in date range, else only for that specific day
if request.dateTo:
dateTo = datetime.strptime(request.dateTo[:10], "%Y-%m-%d").date()
q = q.filter(Session.date >= date)
q = q.filter(Session.date <= dateTo)
else:
q = q.filter(Session.date == date)
q = q.order(Session.date, Session.startTime, Session.name)
# filter sessions
sessions = [sess for sess in q if sess.key.parent() in c_keys]
return SessionForms(
items=[self._copySessionToForm(sess)
for sess in sessions]
)
# confs = [conf for conf in confs if conf.startDate <= date and conf.endDate >= date]
# - - - Speaker - - - - - - - - - - - - - - - - - - -
# helper used on create session
def _getSpeaker(self, wsspk):
"""Get Speaker from datastore
If the speaker doesn't exist create an entry
return:
Speaker
"""
k = ndb.Key(urlsafe=wsspk)
sp = k.get()
# check if key provided is a Speaker Key
if k.kind() != 'Speaker':
raise endpoints.NotFoundException("No speaker with key %s" % wsspk)
# return Speaker
return sp
# used from PUT and POST speaker endpoints
def _createSpeakerObject(self, request):
"""Create SpeakerObject"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
if not request.fullName:
raise endpoints.BadRequestException("Speaker's 'fullName' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
sp_id = Speaker.allocate_ids(size=1)[0]
sp_key = ndb.Key(Speaker, sp_id)
data['key'] = sp_key
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Speaker(**data).put()
return self._copySpeakerToForm(request)
def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Session to SessionForm."""
sf = SpeakerForm()
for field in sf.all_fields():
if hasattr(speaker, field.name):
setattr(sf, field.name, getattr(speaker, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, speaker.key.urlsafe())
sf.check_initialized()
return sf
@staticmethod
def _cacheFeaturedSpeaker(wsck, speakers_name):
"""Create Featured Speaker & assign to memcache; used by
getFeaturedSpeaker().
"""
# get conf entity by key
key = ndb.Key(urlsafe=wsck)
if key.kind() == 'Conference':
sp = Speaker.query(Speaker.fullName == speakers_name).get()
# query for seesions of specific conf and speaker
q = Session.query(ancestor=key)
q = Session.query(Session.speaker == sp.key)
sessions = q.fetch()
if len(sessions) > 1:
sessions_names = ', '.join([sess.name for sess in sessions])
# create a message for display
fspeaker = FSPEAKER_TPL.format(speakers_name, sessions_names)
memcache.set(MEMCACHE_FSPEAKER_KEY, fspeaker)
# get featured speaker from memcache.
@endpoints.method(message_types.VoidMessage, StringMessage,
path='featured-speaker',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Get most recent speaker featured in more than one sessions"""
return StringMessage(data=memcache.get(MEMCACHE_FSPEAKER_KEY) or "")
# get speaker from datastore.
@endpoints.method(SP_GET_REQUEST, SpeakerForm,
path='speakers/{websafeSpeakerKey}',
http_method='GET', name='getSpeaker')
def getSpeaker(self, request):
"""Get speaker (by websafeSpeakerKey)"""
wsspk = request.websafeSpeakerKey
sp_key = ndb.Key(urlsafe=wsspk)
if sp_key.kind() != 'Speaker':
raise endpoints.NotFoundException('No speaker by key :%s' % wsspk)
speaker = sp_key.get()
if not speaker:
raise endpoints.NotFoundException('No speaker by key :%s' % wsspk)
return self._copySpeakerToForm(speaker)
# get all speakers from datastore.
@endpoints.method(message_types.VoidMessage, SpeakerForms,
path='speakers',
http_method='GET', name='getSpeakers')
def getSpeakers(self, request):
"""Get all speakers"""
speakers = Speaker.query().fetch()
if not speakers:
raise endpoints.NotFoundException('No speakers found')
return SpeakerForms(items=[self._copySpeakerToForm(sp)
for sp in speakers])
@endpoints.method(SP_POST_REQUEST, SpeakerForm,
path='speakers',
http_method='POST', name='createSpeaker')
def createSpeaker(self, request):
"""Get speaker (by websafeSpeakerKey)"""
return self._createSpeakerObject(request)
@endpoints.method(SP_PUT_REQUEST, SpeakerForm,
path='speakers/{websafeSpeakerKey}',
http_method='PUT', name='updateSpeaker')
def updateSpeaker(self, request):
"""Get speaker (by websafeSpeakerKey)"""
return self._createSpeakerObject(request)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
# else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - User Wishlist - - - - - - - - - - - - - - - - - - - -
def _appendToWishlist(self, request, add=True):
"""Add or delete Session in user's Wishlist ."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if session exists given sessionKey
# get session; check that it exists
s_key = request.sessionKey
key = ndb.Key(urlsafe=s_key)
if key.kind() != "Session":
raise endpoints.NotFoundException(
'No session found with key: %s' % s_key)
session = key.get()
if not session:
raise endpoints.NotFoundException(
'No session found with key: %s' % s_key)
# add
if add:
# check if user already wishlisted this session otherwise add
if s_key in prof.sessionWishlist:
raise ConflictException(
"You have already added this session to the wishlist")
# register user, take away one seat
prof.sessionWishlist.append(s_key)
retval = True
# delete
else:
# check if user already registered
if s_key in prof.sessionWishlist:
# unregister user, add back one seat
prof.sessionWishlist.remove(s_key)
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
return BooleanMessage(data=retval)
@endpoints.method(WISHLIST_POST, BooleanMessage,
path='wishlist/{sessionKey}',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add session to user's widhlist."""
return self._appendToWishlist(request)
@endpoints.method(WISHLIST_POST, BooleanMessage,
path='wishlist/{sessionKey}',
http_method='DELETE', name='deleteSessionInWishlist')
def deleteSessionInWishlist(self, request):
"""Add session to user's widhlist."""
return self._appendToWishlist(request, add=False)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='wishlist',
http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
sess_keys = [ndb.Key(urlsafe=s_key) for s_key in prof.sessionWishlist]
sessions = ndb.get_multi(sess_keys)
# return set of SessionForm
return SessionForms(items=[self._copySessionToForm(sess) for sess in sessions])
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city == "London")
q = q.filter(Conference.topics == "Medical Innovations")
q = q.filter(Conference.month == 6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API
__authors__ = 'wesc+api@google.com (Wesley Chun), cooxlee@gmail.com (Koox00)'
| {
"content_hash": "d48716f681e39f7fa8ecb973818f031a",
"timestamp": "",
"source": "github",
"line_count": 1142,
"max_line_length": 112,
"avg_line_length": 38.98949211908932,
"alnum_prop": 0.5934959349593496,
"repo_name": "koox00/scalable-py",
"id": "10b5c3263d68f8b22e99866f420c229b2a9fd062",
"size": "44549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32773"
},
{
"name": "Python",
"bytes": "55943"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
__version__ = '0.1.0'
# TODO: If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-SerialPort'
ext_name = 'serialport'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['port'] = config.String()
schema['baud'] = config.Integer()
schema['channels'] = config.List()
schema['min_volume'] = config.Integer()
schema['max_volume'] = config.Integer()
schema['volume_step'] = config.Integer()
schema['enable_noise'] = config.Boolean()
return schema
def setup(self, registry):
from .frontend import SerialPortFrontend
registry.add('frontend', SerialPortFrontend)
| {
"content_hash": "d62b00ffaa90e4782e58d53d6bb55324",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 27.692307692307693,
"alnum_prop": 0.6416666666666667,
"repo_name": "prayerslayer/mopidy-serialport",
"id": "86ac67a0d5ec90c526d9c8ccafa449e5263b6281",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_serialport/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6926"
}
],
"symlink_target": ""
} |
import sys
import logging
import datetime
from modularodm import Q
from dateutil.relativedelta import relativedelta
from scripts import utils as scripts_utils
from website.app import init_app
from website.oauth.models import ExternalAccount
from website.addons.base.exceptions import AddonError
from website.addons.box.utils import refresh_oauth_key
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_targets(delta):
return ExternalAccount.find(
Q('expires_at', 'lt', datetime.datetime.utcnow() + delta) &
Q('provider', 'eq', 'box')
)
def main(delta, dry_run):
for record in get_targets(delta):
logger.info(
'Refreshing tokens on record {0}; expires at {1}'.format(
record._id,
record.expires_at.strftime('%c')
)
)
if not dry_run:
try:
refresh_oauth_key(record, force=True)
except AddonError as ex:
logger.error(ex.message)
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
dry_run = 'dry' in sys.argv
try:
days = int(sys.argv[2])
except (IndexError, ValueError, TypeError):
days = 7 # refresh tokens that expire this week
delta = relativedelta(days=days)
# Log to file
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(delta, dry_run=dry_run)
| {
"content_hash": "216336fbca643c5be77a06d7d3d1f2e8",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 28.352941176470587,
"alnum_prop": 0.6355463347164592,
"repo_name": "njantrania/osf.io",
"id": "e366a4b6e57124c37d33839ee976e40f1138a41e",
"size": "1487",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/refresh_box_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119424"
},
{
"name": "HTML",
"bytes": "31299"
},
{
"name": "JavaScript",
"bytes": "1175450"
},
{
"name": "Mako",
"bytes": "537851"
},
{
"name": "Python",
"bytes": "3844872"
},
{
"name": "Shell",
"bytes": "1927"
}
],
"symlink_target": ""
} |
from ....testing import assert_equal
from ..preprocess import AutoTcorrelate
def test_AutoTcorrelate_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
eta2=dict(argstr='-eta2',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
mask=dict(argstr='-mask %s',
),
mask_only_targets=dict(argstr='-mask_only_targets',
xor=[u'mask_source'],
),
mask_source=dict(argstr='-mask_source %s',
xor=[u'mask_only_targets'],
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_similarity_matrix.1D',
),
outputtype=dict(),
polort=dict(argstr='-polort %d',
),
terminal_output=dict(nohash=True,
),
)
inputs = AutoTcorrelate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_AutoTcorrelate_outputs():
output_map = dict(out_file=dict(),
)
outputs = AutoTcorrelate.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "7f8e17049ff05e9fbcb4294b057484d4",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 26.50943396226415,
"alnum_prop": 0.6199288256227758,
"repo_name": "carolFrohlich/nipype",
"id": "31216252a474ce21c2656970a3f26698737a92e1",
"size": "1459",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
from sqlalchemy.testing import eq_, is_, assert_raises, \
assert_raises_message, expect_warnings
import decimal
import datetime
import os
from sqlalchemy import (
Unicode, MetaData, PickleType, Boolean, TypeDecorator, Integer,
Interval, Float, Numeric, Text, CHAR, String, distinct, select, bindparam,
and_, func, Date, LargeBinary, literal, cast, text, Enum,
type_coerce, VARCHAR, Time, DateTime, BigInteger, SmallInteger, BOOLEAN,
BLOB, NCHAR, NVARCHAR, CLOB, TIME, DATE, DATETIME, TIMESTAMP, SMALLINT,
INTEGER, DECIMAL, NUMERIC, FLOAT, REAL)
from sqlalchemy.sql import ddl
from sqlalchemy import inspection
from sqlalchemy import exc, types, util, dialects
for name in dialects.__all__:
__import__("sqlalchemy.dialects.%s" % name)
from sqlalchemy.sql import operators, column, table
from sqlalchemy.schema import CheckConstraint, AddConstraint
from sqlalchemy.engine import default
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import testing
from sqlalchemy.testing import AssertsCompiledSQL, AssertsExecutionResults, \
engines, pickleable
from sqlalchemy.testing.util import picklers
from sqlalchemy.testing.util import round_decimal
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
class AdaptTest(fixtures.TestBase):
def _all_dialect_modules(self):
return [
getattr(dialects, d)
for d in dialects.__all__
if not d.startswith('_')
]
def _all_dialects(self):
return [d.base.dialect() for d in
self._all_dialect_modules()]
def _types_for_mod(self, mod):
for key in dir(mod):
typ = getattr(mod, key)
if not isinstance(typ, type) or \
not issubclass(typ, types.TypeEngine):
continue
yield typ
def _all_types(self):
for typ in self._types_for_mod(types):
yield typ
for dialect in self._all_dialect_modules():
for typ in self._types_for_mod(dialect):
yield typ
def test_uppercase_importable(self):
import sqlalchemy as sa
for typ in self._types_for_mod(types):
if typ.__name__ == typ.__name__.upper():
assert getattr(sa, typ.__name__) is typ
assert typ.__name__ in types.__all__
def test_uppercase_rendering(self):
"""Test that uppercase types from types.py always render as their
type.
As of SQLA 0.6, using an uppercase type means you want specifically
that type. If the database in use doesn't support that DDL, it (the DB
backend) should raise an error - it means you should be using a
lowercased (genericized) type.
"""
for dialect in self._all_dialects():
for type_, expected in (
(REAL, "REAL"),
(FLOAT, "FLOAT"),
(NUMERIC, "NUMERIC"),
(DECIMAL, "DECIMAL"),
(INTEGER, "INTEGER"),
(SMALLINT, "SMALLINT"),
(TIMESTAMP, ("TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE")),
(DATETIME, "DATETIME"),
(DATE, "DATE"),
(TIME, ("TIME", "TIME WITHOUT TIME ZONE")),
(CLOB, "CLOB"),
(VARCHAR(10), ("VARCHAR(10)", "VARCHAR(10 CHAR)")),
(NVARCHAR(10), (
"NVARCHAR(10)", "NATIONAL VARCHAR(10)", "NVARCHAR2(10)")),
(CHAR, "CHAR"),
(NCHAR, ("NCHAR", "NATIONAL CHAR")),
(BLOB, ("BLOB", "BLOB SUB_TYPE 0")),
(BOOLEAN, ("BOOLEAN", "BOOL", "INTEGER"))
):
if isinstance(expected, str):
expected = (expected, )
try:
compiled = types.to_instance(type_).\
compile(dialect=dialect)
except NotImplementedError:
continue
assert compiled in expected, \
"%r matches none of %r for dialect %s" % \
(compiled, expected, dialect.name)
assert str(types.to_instance(type_)) in expected, \
"default str() of type %r not expected, %r" % \
(type_, expected)
@testing.uses_deprecated()
def test_adapt_method(self):
"""ensure all types have a working adapt() method,
which creates a distinct copy.
The distinct copy ensures that when we cache
the adapted() form of a type against the original
in a weak key dictionary, a cycle is not formed.
This test doesn't test type-specific arguments of
adapt() beyond their defaults.
"""
def adaptions():
for typ in self._all_types():
up_adaptions = [typ] + typ.__subclasses__()
yield False, typ, up_adaptions
for subcl in typ.__subclasses__():
if subcl is not typ and typ is not TypeDecorator and \
"sqlalchemy" in subcl.__module__:
yield True, subcl, [typ]
for is_down_adaption, typ, target_adaptions in adaptions():
if typ in (types.TypeDecorator, types.TypeEngine, types.Variant):
continue
elif typ is dialects.postgresql.ARRAY:
t1 = typ(String)
else:
t1 = typ()
for cls in target_adaptions:
if not issubclass(typ, types.Enum) and \
issubclass(cls, types.Enum):
continue
# print("ADAPT %s -> %s" % (t1.__class__, cls))
t2 = t1.adapt(cls)
assert t1 is not t2
if is_down_adaption:
t2, t1 = t1, t2
for k in t1.__dict__:
if k in ('impl', '_is_oracle_number', '_create_events'):
continue
# assert each value was copied, or that
# the adapted type has a more specific
# value than the original (i.e. SQL Server
# applies precision=24 for REAL)
assert \
getattr(t2, k) == t1.__dict__[k] or \
t1.__dict__[k] is None
def test_python_type(self):
eq_(types.Integer().python_type, int)
eq_(types.Numeric().python_type, decimal.Decimal)
eq_(types.Numeric(asdecimal=False).python_type, float)
eq_(types.LargeBinary().python_type, util.binary_type)
eq_(types.Float().python_type, float)
eq_(types.Interval().python_type, datetime.timedelta)
eq_(types.Date().python_type, datetime.date)
eq_(types.DateTime().python_type, datetime.datetime)
eq_(types.String().python_type, str)
eq_(types.Unicode().python_type, util.text_type)
eq_(types.String(convert_unicode=True).python_type, util.text_type)
assert_raises(
NotImplementedError,
lambda: types.TypeEngine().python_type
)
@testing.uses_deprecated()
def test_repr(self):
for typ in self._all_types():
if typ in (types.TypeDecorator, types.TypeEngine, types.Variant):
continue
elif typ is dialects.postgresql.ARRAY:
t1 = typ(String)
else:
t1 = typ()
repr(t1)
def test_adapt_constructor_copy_override_kw(self):
"""test that adapt() can accept kw args that override
the state of the original object.
This essentially is testing the behavior of util.constructor_copy().
"""
t1 = String(length=50, convert_unicode=False)
t2 = t1.adapt(Text, convert_unicode=True)
eq_(
t2.length, 50
)
eq_(
t2.convert_unicode, True
)
class TypeAffinityTest(fixtures.TestBase):
def test_type_affinity(self):
for type_, affin in [
(String(), String),
(VARCHAR(), String),
(Date(), Date),
(LargeBinary(), types._Binary)
]:
eq_(type_._type_affinity, affin)
for t1, t2, comp in [
(Integer(), SmallInteger(), True),
(Integer(), String(), False),
(Integer(), Integer(), True),
(Text(), String(), True),
(Text(), Unicode(), True),
(LargeBinary(), Integer(), False),
(LargeBinary(), PickleType(), True),
(PickleType(), LargeBinary(), True),
(PickleType(), PickleType(), True),
]:
eq_(t1._compare_type_affinity(t2), comp, "%s %s" % (t1, t2))
def test_decorator_doesnt_cache(self):
from sqlalchemy.dialects import postgresql
class MyType(TypeDecorator):
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(CHAR(32))
t1 = MyType()
d = postgresql.dialect()
assert t1._type_affinity is String
assert t1.dialect_impl(d)._type_affinity is postgresql.UUID
class PickleTypesTest(fixtures.TestBase):
def test_pickle_types(self):
for loads, dumps in picklers():
column_types = [
Column('Boo', Boolean()),
Column('Str', String()),
Column('Tex', Text()),
Column('Uni', Unicode()),
Column('Int', Integer()),
Column('Sma', SmallInteger()),
Column('Big', BigInteger()),
Column('Num', Numeric()),
Column('Flo', Float()),
Column('Dat', DateTime()),
Column('Dat', Date()),
Column('Tim', Time()),
Column('Lar', LargeBinary()),
Column('Pic', PickleType()),
Column('Int', Interval()),
Column('Enu', Enum('x', 'y', 'z', name="somename")),
]
for column_type in column_types:
meta = MetaData()
Table('foo', meta, column_type)
loads(dumps(column_type))
loads(dumps(meta))
class UserDefinedTest(fixtures.TablesTest, AssertsCompiledSQL):
"""tests user-defined types."""
def test_processing(self):
users = self.tables.users
users.insert().execute(
user_id=2, goofy='jack', goofy2='jack', goofy4=util.u('jack'),
goofy7=util.u('jack'), goofy8=12, goofy9=12)
users.insert().execute(
user_id=3, goofy='lala', goofy2='lala', goofy4=util.u('lala'),
goofy7=util.u('lala'), goofy8=15, goofy9=15)
users.insert().execute(
user_id=4, goofy='fred', goofy2='fred', goofy4=util.u('fred'),
goofy7=util.u('fred'), goofy8=9, goofy9=9)
l = users.select().order_by(users.c.user_id).execute().fetchall()
for assertstr, assertint, assertint2, row in zip(
[
"BIND_INjackBIND_OUT", "BIND_INlalaBIND_OUT",
"BIND_INfredBIND_OUT"],
[1200, 1500, 900],
[1800, 2250, 1350],
l
):
for col in list(row)[1:5]:
eq_(col, assertstr)
eq_(row[5], assertint)
eq_(row[6], assertint2)
for col in row[3], row[4]:
assert isinstance(col, util.text_type)
def test_typedecorator_literal_render(self):
class MyType(types.TypeDecorator):
impl = String
def process_literal_param(self, value, dialect):
return "HI->%s<-THERE" % value
self.assert_compile(
select([literal("test", MyType)]),
"SELECT 'HI->test<-THERE' AS anon_1",
dialect='default',
literal_binds=True
)
def test_kw_colspec(self):
class MyType(types.UserDefinedType):
def get_col_spec(self, **kw):
return "FOOB %s" % kw['type_expression'].name
class MyOtherType(types.UserDefinedType):
def get_col_spec(self):
return "BAR"
self.assert_compile(
ddl.CreateColumn(Column('bar', MyType)),
"bar FOOB bar"
)
self.assert_compile(
ddl.CreateColumn(Column('bar', MyOtherType)),
"bar BAR"
)
def test_typedecorator_literal_render_fallback_bound(self):
# fall back to process_bind_param for literal
# value rendering.
class MyType(types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return "HI->%s<-THERE" % value
self.assert_compile(
select([literal("test", MyType)]),
"SELECT 'HI->test<-THERE' AS anon_1",
dialect='default',
literal_binds=True
)
def test_typedecorator_impl(self):
for impl_, exp, kw in [
(Float, "FLOAT", {}),
(Float, "FLOAT(2)", {'precision': 2}),
(Float(2), "FLOAT(2)", {'precision': 4}),
(Numeric(19, 2), "NUMERIC(19, 2)", {}),
]:
for dialect_ in (
dialects.postgresql, dialects.mssql, dialects.mysql):
dialect_ = dialect_.dialect()
raw_impl = types.to_instance(impl_, **kw)
class MyType(types.TypeDecorator):
impl = impl_
dec_type = MyType(**kw)
eq_(dec_type.impl.__class__, raw_impl.__class__)
raw_dialect_impl = raw_impl.dialect_impl(dialect_)
dec_dialect_impl = dec_type.dialect_impl(dialect_)
eq_(dec_dialect_impl.__class__, MyType)
eq_(
raw_dialect_impl.__class__,
dec_dialect_impl.impl.__class__)
self.assert_compile(
MyType(**kw),
exp,
dialect=dialect_
)
def test_user_defined_typedec_impl(self):
class MyType(types.TypeDecorator):
impl = Float
def load_dialect_impl(self, dialect):
if dialect.name == 'sqlite':
return String(50)
else:
return super(MyType, self).load_dialect_impl(dialect)
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
self.assert_compile(t, "VARCHAR(50)", dialect=sl)
self.assert_compile(t, "FLOAT", dialect=pg)
eq_(
t.dialect_impl(dialect=sl).impl.__class__,
String().dialect_impl(dialect=sl).__class__
)
eq_(
t.dialect_impl(dialect=pg).impl.__class__,
Float().dialect_impl(pg).__class__
)
def test_type_decorator_repr(self):
class MyType(TypeDecorator):
impl = VARCHAR
eq_(repr(MyType(45)), "MyType(length=45)")
def test_user_defined_typedec_impl_bind(self):
class TypeOne(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " ONE"
return go
class TypeTwo(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " TWO"
return go
class MyType(types.TypeDecorator):
impl = TypeOne
def load_dialect_impl(self, dialect):
if dialect.name == 'sqlite':
return TypeOne()
else:
return TypeTwo()
def process_bind_param(self, value, dialect):
return "MYTYPE " + value
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
eq_(
t._cached_bind_processor(sl)('foo'),
"MYTYPE foo ONE"
)
eq_(
t._cached_bind_processor(pg)('foo'),
"MYTYPE foo TWO"
)
def test_user_defined_dialect_specific_args(self):
class MyType(types.UserDefinedType):
def __init__(self, foo='foo', **kwargs):
super(MyType, self).__init__()
self.foo = foo
self.dialect_specific_args = kwargs
def adapt(self, cls):
return cls(foo=self.foo, **self.dialect_specific_args)
t = MyType(bar='bar')
a = t.dialect_impl(testing.db.dialect)
eq_(a.foo, 'foo')
eq_(a.dialect_specific_args['bar'], 'bar')
@classmethod
def define_tables(cls, metadata):
class MyType(types.UserDefinedType):
def get_col_spec(self):
return "VARCHAR(100)"
def bind_processor(self, dialect):
def process(value):
return "BIND_IN" + value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value + "BIND_OUT"
return process
def adapt(self, typeobj):
return typeobj()
class MyDecoratedType(types.TypeDecorator):
impl = String
def bind_processor(self, dialect):
impl_processor = super(MyDecoratedType, self).\
bind_processor(dialect) or (lambda value: value)
def process(value):
return "BIND_IN" + impl_processor(value)
return process
def result_processor(self, dialect, coltype):
impl_processor = super(MyDecoratedType, self).\
result_processor(dialect, coltype) or (lambda value: value)
def process(value):
return impl_processor(value) + "BIND_OUT"
return process
def copy(self):
return MyDecoratedType()
class MyNewUnicodeType(types.TypeDecorator):
impl = Unicode
def process_bind_param(self, value, dialect):
return "BIND_IN" + value
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
def copy(self):
return MyNewUnicodeType(self.impl.length)
class MyNewIntType(types.TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
return value * 10
def process_result_value(self, value, dialect):
return value * 10
def copy(self):
return MyNewIntType()
class MyNewIntSubClass(MyNewIntType):
def process_result_value(self, value, dialect):
return value * 15
def copy(self):
return MyNewIntSubClass()
class MyUnicodeType(types.TypeDecorator):
impl = Unicode
def bind_processor(self, dialect):
impl_processor = super(MyUnicodeType, self).\
bind_processor(dialect) or (lambda value: value)
def process(value):
return "BIND_IN" + impl_processor(value)
return process
def result_processor(self, dialect, coltype):
impl_processor = super(MyUnicodeType, self).\
result_processor(dialect, coltype) or (lambda value: value)
def process(value):
return impl_processor(value) + "BIND_OUT"
return process
def copy(self):
return MyUnicodeType(self.impl.length)
Table(
'users', metadata,
Column('user_id', Integer, primary_key=True),
# totall custom type
Column('goofy', MyType, nullable=False),
# decorated type with an argument, so its a String
Column('goofy2', MyDecoratedType(50), nullable=False),
Column('goofy4', MyUnicodeType(50), nullable=False),
Column('goofy7', MyNewUnicodeType(50), nullable=False),
Column('goofy8', MyNewIntType, nullable=False),
Column('goofy9', MyNewIntSubClass, nullable=False),
)
class TypeCoerceCastTest(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
class MyType(types.TypeDecorator):
impl = String(50)
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
cls.MyType = MyType
Table('t', metadata, Column('data', String(50)))
@testing.fails_on(
"oracle", "oracle doesn't like CAST in the VALUES of an INSERT")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_insert_round_trip_cast(self):
self._test_insert_round_trip(cast)
def test_insert_round_trip_type_coerce(self):
self._test_insert_round_trip(type_coerce)
def _test_insert_round_trip(self, coerce_fn):
MyType = self.MyType
t = self.tables.t
t.insert().values(data=coerce_fn('d1', MyType)).execute()
eq_(
select([coerce_fn(t.c.data, MyType)]).execute().fetchall(),
[('BIND_INd1BIND_OUT', )]
)
@testing.fails_on(
"oracle", "ORA-00906: missing left parenthesis - "
"seems to be CAST(:param AS type)")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_coerce_from_nulltype_cast(self):
self._test_coerce_from_nulltype(cast)
def test_coerce_from_nulltype_type_coerce(self):
self._test_coerce_from_nulltype(type_coerce)
def _test_coerce_from_nulltype(self, coerce_fn):
MyType = self.MyType
# test coerce from nulltype - e.g. use an object that
# does't match to a known type
class MyObj(object):
def __str__(self):
return "THISISMYOBJ"
t = self.tables.t
t.insert().values(data=coerce_fn(MyObj(), MyType)).execute()
eq_(
select([coerce_fn(t.c.data, MyType)]).execute().fetchall(),
[('BIND_INTHISISMYOBJBIND_OUT',)]
)
@testing.fails_on(
"oracle", "oracle doesn't like CAST in the VALUES of an INSERT")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_vs_non_coerced_cast(self):
self._test_vs_non_coerced(cast)
def test_vs_non_coerced_type_coerce(self):
self._test_vs_non_coerced(type_coerce)
def _test_vs_non_coerced(self, coerce_fn):
MyType = self.MyType
t = self.tables.t
t.insert().values(data=coerce_fn('d1', MyType)).execute()
eq_(
select(
[t.c.data, coerce_fn(t.c.data, MyType)]).execute().fetchall(),
[('BIND_INd1', 'BIND_INd1BIND_OUT')]
)
@testing.fails_on(
"oracle", "oracle doesn't like CAST in the VALUES of an INSERT")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_vs_non_coerced_alias_cast(self):
self._test_vs_non_coerced_alias(cast)
def test_vs_non_coerced_alias_type_coerce(self):
self._test_vs_non_coerced_alias(type_coerce)
def _test_vs_non_coerced_alias(self, coerce_fn):
MyType = self.MyType
t = self.tables.t
t.insert().values(data=coerce_fn('d1', MyType)).execute()
eq_(
select([t.c.data, coerce_fn(t.c.data, MyType)]).
alias().select().execute().fetchall(),
[('BIND_INd1', 'BIND_INd1BIND_OUT')]
)
@testing.fails_on(
"oracle", "oracle doesn't like CAST in the VALUES of an INSERT")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_vs_non_coerced_where_cast(self):
self._test_vs_non_coerced_where(cast)
def test_vs_non_coerced_where_type_coerce(self):
self._test_vs_non_coerced_where(type_coerce)
def _test_vs_non_coerced_where(self, coerce_fn):
MyType = self.MyType
t = self.tables.t
t.insert().values(data=coerce_fn('d1', MyType)).execute()
# coerce on left side
eq_(
select([t.c.data, coerce_fn(t.c.data, MyType)]).
where(coerce_fn(t.c.data, MyType) == 'd1').execute().fetchall(),
[('BIND_INd1', 'BIND_INd1BIND_OUT')]
)
# coerce on right side
eq_(
select([t.c.data, coerce_fn(t.c.data, MyType)]).
where(t.c.data == coerce_fn('d1', MyType)).execute().fetchall(),
[('BIND_INd1', 'BIND_INd1BIND_OUT')]
)
@testing.fails_on(
"oracle", "oracle doesn't like CAST in the VALUES of an INSERT")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_coerce_none_cast(self):
self._test_coerce_none(cast)
def test_coerce_none_type_coerce(self):
self._test_coerce_none(type_coerce)
def _test_coerce_none(self, coerce_fn):
MyType = self.MyType
t = self.tables.t
t.insert().values(data=coerce_fn('d1', MyType)).execute()
eq_(
select([t.c.data, coerce_fn(t.c.data, MyType)]).
where(t.c.data == coerce_fn(None, MyType)).execute().fetchall(),
[]
)
eq_(
select([t.c.data, coerce_fn(t.c.data, MyType)]).
where(coerce_fn(t.c.data, MyType) == None). # noqa
execute().fetchall(),
[]
)
@testing.fails_on(
"oracle", "oracle doesn't like CAST in the VALUES of an INSERT")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_resolve_clause_element_cast(self):
self._test_resolve_clause_element(cast)
def test_resolve_clause_element_type_coerce(self):
self._test_resolve_clause_element(type_coerce)
def _test_resolve_clause_element(self, coerce_fn):
MyType = self.MyType
t = self.tables.t
t.insert().values(data=coerce_fn('d1', MyType)).execute()
class MyFoob(object):
def __clause_element__(self):
return t.c.data
eq_(
testing.db.execute(
select([t.c.data, coerce_fn(MyFoob(), MyType)])
).fetchall(),
[('BIND_INd1', 'BIND_INd1BIND_OUT')]
)
@testing.fails_on(
"oracle", "ORA-00906: missing left parenthesis - "
"seems to be CAST(:param AS type)")
@testing.fails_on(
"mysql", "mysql dialect warns on skipped CAST")
def test_cast_existing_typed(self):
MyType = self.MyType
coerce_fn = cast
# when cast() is given an already typed value,
# the type does not take effect on the value itself.
eq_(
testing.db.scalar(
select([coerce_fn(literal('d1'), MyType)])
),
'd1BIND_OUT'
)
def test_type_coerce_existing_typed(self):
MyType = self.MyType
coerce_fn = type_coerce
t = self.tables.t
# type_coerce does upgrade the given expression to the
# given type.
t.insert().values(data=coerce_fn(literal('d1'), MyType)).execute()
eq_(
select([coerce_fn(t.c.data, MyType)]).execute().fetchall(),
[('BIND_INd1BIND_OUT', )])
class VariantTest(fixtures.TestBase, AssertsCompiledSQL):
def setup(self):
class UTypeOne(types.UserDefinedType):
def get_col_spec(self):
return "UTYPEONE"
def bind_processor(self, dialect):
def process(value):
return value + "UONE"
return process
class UTypeTwo(types.UserDefinedType):
def get_col_spec(self):
return "UTYPETWO"
def bind_processor(self, dialect):
def process(value):
return value + "UTWO"
return process
class UTypeThree(types.UserDefinedType):
def get_col_spec(self):
return "UTYPETHREE"
self.UTypeOne = UTypeOne
self.UTypeTwo = UTypeTwo
self.UTypeThree = UTypeThree
self.variant = self.UTypeOne().with_variant(
self.UTypeTwo(), 'postgresql')
self.composite = self.variant.with_variant(self.UTypeThree(), 'mysql')
def test_illegal_dupe(self):
v = self.UTypeOne().with_variant(
self.UTypeTwo(), 'postgresql'
)
assert_raises_message(
exc.ArgumentError,
"Dialect 'postgresql' is already present "
"in the mapping for this Variant",
lambda: v.with_variant(self.UTypeThree(), 'postgresql')
)
def test_compile(self):
self.assert_compile(
self.variant,
"UTYPEONE",
use_default_dialect=True
)
self.assert_compile(
self.variant,
"UTYPEONE",
dialect=dialects.mysql.dialect()
)
self.assert_compile(
self.variant,
"UTYPETWO",
dialect=dialects.postgresql.dialect()
)
def test_to_instance(self):
self.assert_compile(
self.UTypeOne().with_variant(self.UTypeTwo, "postgresql"),
"UTYPETWO",
dialect=dialects.postgresql.dialect()
)
def test_compile_composite(self):
self.assert_compile(
self.composite,
"UTYPEONE",
use_default_dialect=True
)
self.assert_compile(
self.composite,
"UTYPETHREE",
dialect=dialects.mysql.dialect()
)
self.assert_compile(
self.composite,
"UTYPETWO",
dialect=dialects.postgresql.dialect()
)
def test_bind_process(self):
eq_(
self.variant._cached_bind_processor(
dialects.mysql.dialect())('foo'),
'fooUONE'
)
eq_(
self.variant._cached_bind_processor(
default.DefaultDialect())('foo'),
'fooUONE'
)
eq_(
self.variant._cached_bind_processor(
dialects.postgresql.dialect())('foo'),
'fooUTWO'
)
def test_bind_process_composite(self):
assert self.composite._cached_bind_processor(
dialects.mysql.dialect()) is None
eq_(
self.composite._cached_bind_processor(
default.DefaultDialect())('foo'),
'fooUONE'
)
eq_(
self.composite._cached_bind_processor(
dialects.postgresql.dialect())('foo'),
'fooUTWO'
)
class UnicodeTest(fixtures.TestBase):
"""Exercise the Unicode and related types.
Note: unicode round trip tests are now in
sqlalchemy/testing/suite/test_types.py.
"""
__backend__ = True
def test_native_unicode(self):
"""assert expected values for 'native unicode' mode"""
if testing.against('mssql+pyodbc'):
eq_(
testing.db.dialect.returns_unicode_strings,
'conditional'
)
elif testing.against('mssql+mxodbc'):
eq_(
testing.db.dialect.returns_unicode_strings,
'conditional'
)
elif testing.against('mssql+pymssql'):
eq_(
testing.db.dialect.returns_unicode_strings,
('charset' in testing.db.url.query)
)
elif testing.against('mysql+cymysql', 'mysql+pymssql'):
eq_(
testing.db.dialect.returns_unicode_strings,
True if util.py3k else False
)
elif testing.against('oracle+cx_oracle'):
eq_(
testing.db.dialect.returns_unicode_strings,
True if util.py3k else "conditional"
)
elif testing.against("mysql+mysqldb"):
eq_(
testing.db.dialect.returns_unicode_strings,
True if util.py3k or util.asbool(
testing.db.url.query.get("use_unicode")
)
else False
)
else:
expected = (testing.db.name, testing.db.driver) in \
(
('postgresql', 'psycopg2'),
('postgresql', 'psycopg2cffi'),
('postgresql', 'pypostgresql'),
('postgresql', 'pg8000'),
('postgresql', 'zxjdbc'),
('mysql', 'pymysql'),
('mysql', 'oursql'),
('mysql', 'zxjdbc'),
('mysql', 'mysqlconnector'),
('sqlite', 'pysqlite'),
('oracle', 'zxjdbc'),
)
eq_(
testing.db.dialect.returns_unicode_strings,
expected
)
data = util.u(
"Alors vous imaginez ma surprise, au lever du jour, quand "
"une drôle de petite voix m’a réveillé. "
"Elle disait: « S’il vous plaît… dessine-moi un mouton! »")
def test_unicode_warnings_typelevel_native_unicode(self):
unicodedata = self.data
u = Unicode()
dialect = default.DefaultDialect()
dialect.supports_unicode_binds = True
uni = u.dialect_impl(dialect).bind_processor(dialect)
if util.py3k:
assert_raises(exc.SAWarning, uni, b'x')
assert isinstance(uni(unicodedata), str)
else:
assert_raises(exc.SAWarning, uni, 'x')
assert isinstance(uni(unicodedata), unicode) # noqa
def test_unicode_warnings_typelevel_sqla_unicode(self):
unicodedata = self.data
u = Unicode()
dialect = default.DefaultDialect()
dialect.supports_unicode_binds = False
uni = u.dialect_impl(dialect).bind_processor(dialect)
assert_raises(exc.SAWarning, uni, util.b('x'))
assert isinstance(uni(unicodedata), util.binary_type)
eq_(uni(unicodedata), unicodedata.encode('utf-8'))
def test_unicode_warnings_totally_wrong_type(self):
u = Unicode()
dialect = default.DefaultDialect()
dialect.supports_unicode_binds = False
uni = u.dialect_impl(dialect).bind_processor(dialect)
with expect_warnings(
"Unicode type received non-unicode bind param value 5."):
eq_(uni(5), 5)
def test_unicode_warnings_dialectlevel(self):
unicodedata = self.data
dialect = default.DefaultDialect(convert_unicode=True)
dialect.supports_unicode_binds = False
s = String()
uni = s.dialect_impl(dialect).bind_processor(dialect)
uni(util.b('x'))
assert isinstance(uni(unicodedata), util.binary_type)
eq_(uni(unicodedata), unicodedata.encode('utf-8'))
def test_ignoring_unicode_error(self):
"""checks String(unicode_error='ignore') is passed to
underlying codec."""
unicodedata = self.data
type_ = String(248, convert_unicode='force', unicode_error='ignore')
dialect = default.DefaultDialect(encoding='ascii')
proc = type_.result_processor(dialect, 10)
utfdata = unicodedata.encode('utf8')
eq_(
proc(utfdata),
unicodedata.encode('ascii', 'ignore').decode()
)
enum_table = non_native_enum_table = metadata = None
class EnumTest(AssertsCompiledSQL, fixtures.TestBase):
@classmethod
def setup_class(cls):
global enum_table, non_native_enum_table, metadata
metadata = MetaData(testing.db)
enum_table = Table(
'enum_table', metadata, Column("id", Integer, primary_key=True),
Column('someenum', Enum('one', 'two', 'three', name='myenum'))
)
non_native_enum_table = Table(
'non_native_enum_table', metadata,
Column("id", Integer, primary_key=True),
Column('someenum', Enum('one', 'two', 'three', native_enum=False)),
)
metadata.create_all()
def teardown(self):
enum_table.delete().execute()
non_native_enum_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on(
'postgresql+zxjdbc',
'zxjdbc fails on ENUM: column "XXX" is of type XXX '
'but expression is of type character varying')
def test_round_trip(self):
enum_table.insert().execute([
{'id': 1, 'someenum': 'two'},
{'id': 2, 'someenum': 'two'},
{'id': 3, 'someenum': 'one'},
])
eq_(
enum_table.select().order_by(enum_table.c.id).execute().fetchall(),
[
(1, 'two'),
(2, 'two'),
(3, 'one'),
]
)
def test_non_native_round_trip(self):
non_native_enum_table.insert().execute([
{'id': 1, 'someenum': 'two'},
{'id': 2, 'someenum': 'two'},
{'id': 3, 'someenum': 'one'},
])
eq_(
non_native_enum_table.select().
order_by(non_native_enum_table.c.id).execute().fetchall(),
[
(1, 'two'),
(2, 'two'),
(3, 'one'),
]
)
def test_adapt(self):
from sqlalchemy.dialects.postgresql import ENUM
e1 = Enum('one', 'two', 'three', native_enum=False)
eq_(e1.adapt(ENUM).native_enum, False)
e1 = Enum('one', 'two', 'three', native_enum=True)
eq_(e1.adapt(ENUM).native_enum, True)
e1 = Enum('one', 'two', 'three', name='foo', schema='bar')
eq_(e1.adapt(ENUM).name, 'foo')
eq_(e1.adapt(ENUM).schema, 'bar')
@testing.provide_metadata
def test_create_metadata_bound_no_crash(self):
m1 = self.metadata
Enum('a', 'b', 'c', metadata=m1, name='ncenum')
m1.create_all(testing.db)
@testing.crashes(
'mysql', 'Inconsistent behavior across various OS/drivers')
def test_constraint(self):
assert_raises(
exc.DBAPIError, enum_table.insert().execute,
{'id': 4, 'someenum': 'four'})
def test_non_native_constraint_custom_type(self):
class Foob(object):
def __init__(self, name):
self.name = name
class MyEnum(types.SchemaType, TypeDecorator):
def __init__(self, values):
self.impl = Enum(
*[v.name for v in values], name="myenum",
native_enum=False)
def _set_table(self, table, column):
self.impl._set_table(table, column)
# future method
def process_literal_param(self, value, dialect):
return value.name
def process_bind_param(self, value, dialect):
return value.name
m = MetaData()
t1 = Table('t', m, Column('x', MyEnum([Foob('a'), Foob('b')])))
const = [
c for c in t1.constraints if isinstance(c, CheckConstraint)][0]
self.assert_compile(
AddConstraint(const),
"ALTER TABLE t ADD CONSTRAINT myenum CHECK (x IN ('a', 'b'))",
dialect="default"
)
@testing.fails_on(
'mysql',
"the CHECK constraint doesn't raise an exception for unknown reason")
def test_non_native_constraint(self):
assert_raises(
exc.DBAPIError, non_native_enum_table.insert().execute,
{'id': 4, 'someenum': 'four'}
)
def test_mock_engine_no_prob(self):
"""ensure no 'checkfirst' queries are run when enums
are created with checkfirst=False"""
e = engines.mock_engine()
t = Table('t1', MetaData(), Column('x', Enum("x", "y", name="pge")))
t.create(e, checkfirst=False)
# basically looking for the start of
# the constraint, or the ENUM def itself,
# depending on backend.
assert "('x'," in e.print_sql()
def test_repr(self):
e = Enum(
"x", "y", name="somename", convert_unicode=True, quote=True,
inherit_schema=True, native_enum=False)
eq_(
repr(e),
"Enum('x', 'y', name='somename', "
"inherit_schema=True, native_enum=False)")
binary_table = MyPickleType = metadata = None
class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
global binary_table, MyPickleType, metadata
class MyPickleType(types.TypeDecorator):
impl = PickleType
def process_bind_param(self, value, dialect):
if value:
value.stuff = 'this is modified stuff'
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = 'this is the right stuff'
return value
metadata = MetaData(testing.db)
binary_table = Table(
'binary_table', metadata,
Column(
'primary_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', LargeBinary),
Column('data_slice', LargeBinary(100)),
Column('misc', String(30)),
Column('pickled', PickleType),
Column('mypickle', MyPickleType)
)
metadata.create_all()
@engines.close_first
def teardown(self):
binary_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_round_trip(self):
testobj1 = pickleable.Foo('im foo 1')
testobj2 = pickleable.Foo('im foo 2')
testobj3 = pickleable.Foo('im foo 3')
stream1 = self.load_stream('binary_data_one.dat')
stream2 = self.load_stream('binary_data_two.dat')
binary_table.insert().execute(
primary_id=1, misc='binary_data_one.dat', data=stream1,
data_slice=stream1[0:100], pickled=testobj1, mypickle=testobj3)
binary_table.insert().execute(
primary_id=2, misc='binary_data_two.dat', data=stream2,
data_slice=stream2[0:99], pickled=testobj2)
binary_table.insert().execute(
primary_id=3, misc='binary_data_two.dat', data=None,
data_slice=stream2[0:99], pickled=None)
for stmt in (
binary_table.select(order_by=binary_table.c.primary_id),
text(
"select * from binary_table order by binary_table.primary_id",
typemap={
'pickled': PickleType, 'mypickle': MyPickleType,
'data': LargeBinary, 'data_slice': LargeBinary},
bind=testing.db)
):
l = stmt.execute().fetchall()
eq_(stream1, l[0]['data'])
eq_(stream1[0:100], l[0]['data_slice'])
eq_(stream2, l[1]['data'])
eq_(testobj1, l[0]['pickled'])
eq_(testobj2, l[1]['pickled'])
eq_(testobj3.moredata, l[0]['mypickle'].moredata)
eq_(l[0]['mypickle'].stuff, 'this is the right stuff')
@testing.requires.binary_comparisons
def test_comparison(self):
"""test that type coercion occurs on comparison for binary"""
expr = binary_table.c.data == 'foo'
assert isinstance(expr.right.type, LargeBinary)
data = os.urandom(32)
binary_table.insert().execute(data=data)
eq_(
binary_table.select().where(binary_table.c.data == data).alias().
count().scalar(), 1)
@testing.requires.binary_literals
def test_literal_roundtrip(self):
compiled = select([cast(literal(util.b("foo")), LargeBinary)]).compile(
dialect=testing.db.dialect, compile_kwargs={"literal_binds": True})
result = testing.db.execute(compiled)
eq_(result.scalar(), util.b("foo"))
def test_bind_processor_no_dbapi(self):
b = LargeBinary()
eq_(b.bind_processor(default.DefaultDialect()), None)
def load_stream(self, name):
f = os.path.join(os.path.dirname(__file__), "..", name)
with open(f, mode='rb') as o:
return o.read()
test_table = meta = MyCustomType = MyTypeDec = None
class ExpressionTest(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global test_table, meta, MyCustomType, MyTypeDec
class MyCustomType(types.UserDefinedType):
def get_col_spec(self):
return "INT"
def bind_processor(self, dialect):
def process(value):
return value * 10
return process
def result_processor(self, dialect, coltype):
def process(value):
return value / 10
return process
class MyOldCustomType(MyCustomType):
def adapt_operator(self, op):
return {
operators.add: operators.sub,
operators.sub: operators.add}.get(op, op)
class MyTypeDec(types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
meta = MetaData(testing.db)
test_table = Table(
'test', meta,
Column('id', Integer, primary_key=True),
Column('data', String(30)),
Column('atimestamp', Date),
Column('avalue', MyCustomType),
Column('bvalue', MyTypeDec(50)),
)
meta.create_all()
test_table.insert().execute({
'id': 1, 'data': 'somedata',
'atimestamp': datetime.date(2007, 10, 15), 'avalue': 25,
'bvalue': 'foo'})
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_control(self):
assert testing.db.execute("select avalue from test").scalar() == 250
eq_(
test_table.select().execute().fetchall(),
[(1, 'somedata', datetime.date(2007, 10, 15), 25,
'BIND_INfooBIND_OUT')]
)
def test_bind_adapt(self):
# test an untyped bind gets the left side's type
expr = test_table.c.atimestamp == bindparam("thedate")
eq_(expr.right.type._type_affinity, Date)
eq_(
testing.db.execute(
select([
test_table.c.id, test_table.c.data,
test_table.c.atimestamp]).where(expr),
{"thedate": datetime.date(2007, 10, 15)}).fetchall(), [
(1, 'somedata', datetime.date(2007, 10, 15))]
)
expr = test_table.c.avalue == bindparam("somevalue")
eq_(expr.right.type._type_affinity, MyCustomType)
eq_(
testing.db.execute(
test_table.select().where(expr), {'somevalue': 25}
).fetchall(), [(
1, 'somedata', datetime.date(2007, 10, 15), 25,
'BIND_INfooBIND_OUT')]
)
expr = test_table.c.bvalue == bindparam("somevalue")
eq_(expr.right.type._type_affinity, String)
eq_(
testing.db.execute(
test_table.select().where(expr), {"somevalue": "foo"}
).fetchall(), [(
1, 'somedata', datetime.date(2007, 10, 15), 25,
'BIND_INfooBIND_OUT')]
)
def test_bind_adapt_update(self):
bp = bindparam("somevalue")
stmt = test_table.update().values(avalue=bp)
compiled = stmt.compile()
eq_(bp.type._type_affinity, types.NullType)
eq_(compiled.binds['somevalue'].type._type_affinity, MyCustomType)
def test_bind_adapt_insert(self):
bp = bindparam("somevalue")
stmt = test_table.insert().values(avalue=bp)
compiled = stmt.compile()
eq_(bp.type._type_affinity, types.NullType)
eq_(compiled.binds['somevalue'].type._type_affinity, MyCustomType)
def test_bind_adapt_expression(self):
bp = bindparam("somevalue")
stmt = test_table.c.avalue == bp
eq_(bp.type._type_affinity, types.NullType)
eq_(stmt.right.type._type_affinity, MyCustomType)
def test_literal_adapt(self):
# literals get typed based on the types dictionary, unless
# compatible with the left side type
expr = column('foo', String) == 5
eq_(expr.right.type._type_affinity, Integer)
expr = column('foo', String) == "asdf"
eq_(expr.right.type._type_affinity, String)
expr = column('foo', CHAR) == 5
eq_(expr.right.type._type_affinity, Integer)
expr = column('foo', CHAR) == "asdf"
eq_(expr.right.type.__class__, CHAR)
def test_typedec_operator_adapt(self):
expr = test_table.c.bvalue + "hi"
assert expr.type.__class__ is MyTypeDec
assert expr.right.type.__class__ is MyTypeDec
eq_(
testing.db.execute(select([expr.label('foo')])).scalar(),
"BIND_INfooBIND_INhiBIND_OUT"
)
def test_typedec_is_adapt(self):
class CoerceNothing(TypeDecorator):
coerce_to_is_types = ()
impl = Integer
class CoerceBool(TypeDecorator):
coerce_to_is_types = (bool, )
impl = Boolean
class CoerceNone(TypeDecorator):
coerce_to_is_types = (type(None),)
impl = Integer
c1 = column('x', CoerceNothing())
c2 = column('x', CoerceBool())
c3 = column('x', CoerceNone())
self.assert_compile(
and_(c1 == None, c2 == None, c3 == None), # noqa
"x = :x_1 AND x = :x_2 AND x IS NULL"
)
self.assert_compile(
and_(c1 == True, c2 == True, c3 == True), # noqa
"x = :x_1 AND x = true AND x = :x_2",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
self.assert_compile(
and_(c1 == 3, c2 == 3, c3 == 3),
"x = :x_1 AND x = :x_2 AND x = :x_3",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
self.assert_compile(
and_(c1.is_(True), c2.is_(True), c3.is_(True)),
"x IS :x_1 AND x IS true AND x IS :x_2",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
def test_typedec_righthand_coercion(self):
class MyTypeDec(types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
tab = table('test', column('bvalue', MyTypeDec))
expr = tab.c.bvalue + 6
self.assert_compile(
expr,
"test.bvalue || :bvalue_1",
use_default_dialect=True
)
assert expr.type.__class__ is MyTypeDec
eq_(
testing.db.execute(select([expr.label('foo')])).scalar(),
"BIND_INfooBIND_IN6BIND_OUT"
)
def test_bind_typing(self):
from sqlalchemy.sql import column
class MyFoobarType(types.UserDefinedType):
pass
class Foo(object):
pass
# unknown type + integer, right hand bind
# coerces to given type
expr = column("foo", MyFoobarType) + 5
assert expr.right.type._type_affinity is MyFoobarType
# untyped bind - it gets assigned MyFoobarType
bp = bindparam("foo")
expr = column("foo", MyFoobarType) + bp
assert bp.type._type_affinity is types.NullType
assert expr.right.type._type_affinity is MyFoobarType
expr = column("foo", MyFoobarType) + bindparam("foo", type_=Integer)
assert expr.right.type._type_affinity is types.Integer
# unknown type + unknown, right hand bind
# coerces to the left
expr = column("foo", MyFoobarType) + Foo()
assert expr.right.type._type_affinity is MyFoobarType
# including for non-commutative ops
expr = column("foo", MyFoobarType) - Foo()
assert expr.right.type._type_affinity is MyFoobarType
expr = column("foo", MyFoobarType) - datetime.date(2010, 8, 25)
assert expr.right.type._type_affinity is MyFoobarType
def test_date_coercion(self):
from sqlalchemy.sql import column
expr = column('bar', types.NULLTYPE) - column('foo', types.TIMESTAMP)
eq_(expr.type._type_affinity, types.NullType)
expr = func.sysdate() - column('foo', types.TIMESTAMP)
eq_(expr.type._type_affinity, types.Interval)
expr = func.current_date() - column('foo', types.TIMESTAMP)
eq_(expr.type._type_affinity, types.Interval)
def test_numerics_coercion(self):
from sqlalchemy.sql import column
import operator
for op in (operator.add, operator.mul, operator.truediv, operator.sub):
for other in (Numeric(10, 2), Integer):
expr = op(
column('bar', types.Numeric(10, 2)),
column('foo', other)
)
assert isinstance(expr.type, types.Numeric)
expr = op(
column('foo', other),
column('bar', types.Numeric(10, 2))
)
assert isinstance(expr.type, types.Numeric)
def test_null_comparison(self):
eq_(
str(column('a', types.NullType()) + column('b', types.NullType())),
"a + b"
)
def test_expression_typing(self):
expr = column('bar', Integer) - 3
eq_(expr.type._type_affinity, Integer)
expr = bindparam('bar') + bindparam('foo')
eq_(expr.type, types.NULLTYPE)
def test_distinct(self):
s = select([distinct(test_table.c.avalue)])
eq_(testing.db.execute(s).scalar(), 25)
s = select([test_table.c.avalue.distinct()])
eq_(testing.db.execute(s).scalar(), 25)
assert distinct(test_table.c.data).type == test_table.c.data.type
assert test_table.c.data.distinct().type == test_table.c.data.type
def test_detect_coercion_of_builtins(self):
@inspection._self_inspects
class SomeSQLAThing(object):
def __repr__(self):
return "some_sqla_thing()"
class SomeOtherThing(object):
pass
assert_raises_message(
exc.ArgumentError,
r"Object some_sqla_thing\(\) is not legal as a SQL literal value",
lambda: column('a', String) == SomeSQLAThing()
)
is_(
bindparam('x', SomeOtherThing()).type,
types.NULLTYPE
)
def test_detect_coercion_not_fooled_by_mock(self):
m1 = mock.Mock()
is_(
bindparam('x', m1).type,
types.NULLTYPE
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@testing.requires.unbounded_varchar
def test_string_plain(self):
self.assert_compile(String(), "VARCHAR")
def test_string_length(self):
self.assert_compile(String(50), "VARCHAR(50)")
def test_string_collation(self):
self.assert_compile(
String(50, collation="FOO"), 'VARCHAR(50) COLLATE "FOO"')
def test_char_plain(self):
self.assert_compile(CHAR(), "CHAR")
def test_char_length(self):
self.assert_compile(CHAR(50), "CHAR(50)")
def test_char_collation(self):
self.assert_compile(
CHAR(50, collation="FOO"), 'CHAR(50) COLLATE "FOO"')
def test_text_plain(self):
self.assert_compile(Text(), "TEXT")
def test_text_length(self):
self.assert_compile(Text(50), "TEXT(50)")
def test_text_collation(self):
self.assert_compile(
Text(collation="FOO"), 'TEXT COLLATE "FOO"')
def test_default_compile_pg_inet(self):
self.assert_compile(
dialects.postgresql.INET(), "INET", allow_dialect_select=True)
def test_default_compile_pg_float(self):
self.assert_compile(
dialects.postgresql.FLOAT(), "FLOAT", allow_dialect_select=True)
def test_default_compile_mysql_integer(self):
self.assert_compile(
dialects.mysql.INTEGER(display_width=5), "INTEGER(5)",
allow_dialect_select=True)
def test_numeric_plain(self):
self.assert_compile(types.NUMERIC(), 'NUMERIC')
def test_numeric_precision(self):
self.assert_compile(types.NUMERIC(2), 'NUMERIC(2)')
def test_numeric_scale(self):
self.assert_compile(types.NUMERIC(2, 4), 'NUMERIC(2, 4)')
def test_decimal_plain(self):
self.assert_compile(types.DECIMAL(), 'DECIMAL')
def test_decimal_precision(self):
self.assert_compile(types.DECIMAL(2), 'DECIMAL(2)')
def test_decimal_scale(self):
self.assert_compile(types.DECIMAL(2, 4), 'DECIMAL(2, 4)')
def test_kwarg_legacy_typecompiler(self):
from sqlalchemy.sql import compiler
class SomeTypeCompiler(compiler.GenericTypeCompiler):
# transparently decorated w/ kw decorator
def visit_VARCHAR(self, type_):
return "MYVARCHAR"
# not affected
def visit_INTEGER(self, type_, **kw):
return "MYINTEGER %s" % kw['type_expression'].name
dialect = default.DefaultDialect()
dialect.type_compiler = SomeTypeCompiler(dialect)
self.assert_compile(
ddl.CreateColumn(Column('bar', VARCHAR(50))),
"bar MYVARCHAR",
dialect=dialect
)
self.assert_compile(
ddl.CreateColumn(Column('bar', INTEGER)),
"bar MYINTEGER bar",
dialect=dialect
)
class TestKWArgPassThru(AssertsCompiledSQL, fixtures.TestBase):
__backend__ = True
def test_user_defined(self):
"""test that dialects pass the column through on DDL."""
class MyType(types.UserDefinedType):
def get_col_spec(self, **kw):
return "FOOB %s" % kw['type_expression'].name
m = MetaData()
t = Table('t', m, Column('bar', MyType))
self.assert_compile(
ddl.CreateColumn(t.c.bar),
"bar FOOB bar"
)
class NumericRawSQLTest(fixtures.TestBase):
"""Test what DBAPIs and dialects return without any typing
information supplied at the SQLA level.
"""
def _fixture(self, metadata, type, data):
t = Table('t', metadata, Column("val", type))
metadata.create_all()
t.insert().execute(val=data)
@testing.fails_on('sqlite', "Doesn't provide Decimal results natively")
@testing.provide_metadata
def test_decimal_fp(self):
metadata = self.metadata
self._fixture(metadata, Numeric(10, 5), decimal.Decimal("45.5"))
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45.5"))
@testing.fails_on('sqlite', "Doesn't provide Decimal results natively")
@testing.provide_metadata
def test_decimal_int(self):
metadata = self.metadata
self._fixture(metadata, Numeric(10, 5), decimal.Decimal("45"))
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45"))
@testing.provide_metadata
def test_ints(self):
metadata = self.metadata
self._fixture(metadata, Integer, 45)
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, util.int_types)
eq_(val, 45)
@testing.provide_metadata
def test_float(self):
metadata = self.metadata
self._fixture(metadata, Float, 46.583)
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, float)
# some DBAPIs have unusual float handling
if testing.against('oracle+cx_oracle', 'mysql+oursql', 'firebird'):
eq_(round_decimal(val, 3), 46.583)
else:
eq_(val, 46.583)
interval_table = metadata = None
class IntervalTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
global interval_table, metadata
metadata = MetaData(testing.db)
interval_table = Table(
"intervaltable", metadata,
Column(
"id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("native_interval", Interval()),
Column(
"native_interval_args",
Interval(day_precision=3, second_precision=6)),
Column(
"non_native_interval", Interval(native=False)),
)
metadata.create_all()
@engines.close_first
def teardown(self):
interval_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_non_native_adapt(self):
interval = Interval(native=False)
adapted = interval.dialect_impl(testing.db.dialect)
assert isinstance(adapted, Interval)
assert adapted.native is False
eq_(str(adapted), "DATETIME")
@testing.fails_on(
"postgresql+zxjdbc",
"Not yet known how to pass values of the INTERVAL type")
@testing.fails_on(
"oracle+zxjdbc",
"Not yet known how to pass values of the INTERVAL type")
def test_roundtrip(self):
small_delta = datetime.timedelta(days=15, seconds=5874)
delta = datetime.timedelta(414)
interval_table.insert().execute(
native_interval=small_delta, native_interval_args=delta,
non_native_interval=delta)
row = interval_table.select().execute().first()
eq_(row['native_interval'], small_delta)
eq_(row['native_interval_args'], delta)
eq_(row['non_native_interval'], delta)
@testing.fails_on(
"oracle+zxjdbc",
"Not yet known how to pass values of the INTERVAL type")
def test_null(self):
interval_table.insert().execute(
id=1, native_inverval=None, non_native_interval=None)
row = interval_table.select().execute().first()
eq_(row['native_interval'], None)
eq_(row['native_interval_args'], None)
eq_(row['non_native_interval'], None)
class BooleanTest(
fixtures.TablesTest, AssertsExecutionResults, AssertsCompiledSQL):
"""test edge cases for booleans. Note that the main boolean test suite
is now in testing/suite/test_types.py
"""
@classmethod
def define_tables(cls, metadata):
Table(
'boolean_table', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('value', Boolean),
Column('unconstrained_value', Boolean(create_constraint=False)),
)
@testing.fails_on(
'mysql',
"The CHECK clause is parsed but ignored by all storage engines.")
@testing.fails_on(
'mssql', "FIXME: MS-SQL 2005 doesn't honor CHECK ?!?")
@testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
def test_constraint(self):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
testing.db.execute,
"insert into boolean_table (id, value) values(1, 5)")
@testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
def test_unconstrained(self):
testing.db.execute(
"insert into boolean_table (id, unconstrained_value)"
"values (1, 5)")
def test_non_native_constraint_custom_type(self):
class Foob(object):
def __init__(self, value):
self.value = value
class MyBool(types.SchemaType, TypeDecorator):
impl = Boolean()
def _set_table(self, table, column):
self.impl._set_table(table, column)
# future method
def process_literal_param(self, value, dialect):
return value.value
def process_bind_param(self, value, dialect):
return value.value
m = MetaData()
t1 = Table('t', m, Column('x', MyBool()))
const = [
c for c in t1.constraints if isinstance(c, CheckConstraint)][0]
self.assert_compile(
AddConstraint(const),
"ALTER TABLE t ADD CHECK (x IN (0, 1))",
dialect="sqlite"
)
class PickleTest(fixtures.TestBase):
def test_eq_comparison(self):
p1 = PickleType()
for obj in (
{'1': '2'},
pickleable.Bar(5, 6),
pickleable.OldSchool(10, 11)
):
assert p1.compare_values(p1.copy_value(obj), obj)
assert_raises(
NotImplementedError, p1.compare_values,
pickleable.BrokenComparable('foo'),
pickleable.BrokenComparable('foo'))
def test_nonmutable_comparison(self):
p1 = PickleType()
for obj in (
{'1': '2'},
pickleable.Bar(5, 6),
pickleable.OldSchool(10, 11)
):
assert p1.compare_values(p1.copy_value(obj), obj)
meta = None
class CallableTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global meta
meta = MetaData(testing.db)
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_callable_as_arg(self):
ucode = util.partial(Unicode)
thing_table = Table(
'thing', meta, Column('name', ucode(20))
)
assert isinstance(thing_table.c.name.type, Unicode)
thing_table.create()
def test_callable_as_kwarg(self):
ucode = util.partial(Unicode)
thang_table = Table(
'thang', meta, Column('name', type_=ucode(20), primary_key=True)
)
assert isinstance(thang_table.c.name.type, Unicode)
thang_table.create()
| {
"content_hash": "ef13e35f6ff5f05302d8db0f11c757d3",
"timestamp": "",
"source": "github",
"line_count": 2028,
"max_line_length": 79,
"avg_line_length": 33.08481262327416,
"alnum_prop": 0.5491981638249672,
"repo_name": "EvaSDK/sqlalchemy",
"id": "d562c83ce6f7c9123f7510a7056a4a0e2a8e9616",
"size": "67124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/sql/test_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8682711"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import uuid
from datetime import datetime
from random import random, randint
import pytz
from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends
from copy import copy
class BaseObject(BaseModel):
def camelCase(self, key):
words = []
for i, word in enumerate(key.split('_')):
if i > 0:
words.append(word.title())
else:
words.append(word)
return ''.join(words)
def gen_response_object(self):
response_object = copy(self.__dict__)
for key, value in response_object.items():
if '_' in key:
response_object[self.camelCase(key)] = value
del response_object[key]
return response_object
@property
def response_object(self):
return self.gen_response_object()
class Cluster(BaseObject):
def __init__(self, cluster_name):
self.active_services_count = 0
self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format(
cluster_name)
self.name = cluster_name
self.pending_tasks_count = 0
self.registered_container_instances_count = 0
self.running_tasks_count = 0
self.status = 'ACTIVE'
@property
def physical_resource_id(self):
return self.name
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['clusterArn'] = self.arn
response_object['clusterName'] = self.name
del response_object['arn'], response_object['name']
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random
# name if necessary
cluster_name=properties.get(
'ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))),
)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if original_resource.name != properties['ClusterName']:
ecs_backend = ecs_backends[region_name]
ecs_backend.delete_cluster(original_resource.arn)
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a
# random name if necessary
cluster_name=properties.get(
'ClusterName', 'ecscluster{0}'.format(int(random() * 10 ** 6))),
)
else:
# no-op when nothing changed between old and new resources
return original_resource
class TaskDefinition(BaseObject):
def __init__(self, family, revision, container_definitions, volumes=None):
self.family = family
self.revision = revision
self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(
family, revision)
self.container_definitions = container_definitions
if volumes is None:
self.volumes = []
else:
self.volumes = volumes
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['taskDefinitionArn'] = response_object['arn']
del response_object['arn']
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
family = properties.get(
'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6)))
container_definitions = properties['ContainerDefinitions']
volumes = properties.get('Volumes')
ecs_backend = ecs_backends[region_name]
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
family = properties.get(
'Family', 'task-definition-{0}'.format(int(random() * 10 ** 6)))
container_definitions = properties['ContainerDefinitions']
volumes = properties.get('Volumes')
if (original_resource.family != family or
original_resource.container_definitions != container_definitions or
original_resource.volumes != volumes):
# currently TaskRoleArn isn't stored at TaskDefinition
# instances
ecs_backend = ecs_backends[region_name]
ecs_backend.deregister_task_definition(original_resource.arn)
return ecs_backend.register_task_definition(
family=family, container_definitions=container_definitions, volumes=volumes)
else:
# no-op when nothing changed between old and new resources
return original_resource
class Task(BaseObject):
def __init__(self, cluster, task_definition, container_instance_arn,
resource_requirements, overrides={}, started_by=''):
self.cluster_arn = cluster.arn
self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(
str(uuid.uuid1()))
self.container_instance_arn = container_instance_arn
self.last_status = 'RUNNING'
self.desired_status = 'RUNNING'
self.task_definition_arn = task_definition.arn
self.overrides = overrides
self.containers = []
self.started_by = started_by
self.stopped_reason = ''
self.resource_requirements = resource_requirements
@property
def response_object(self):
response_object = self.gen_response_object()
return response_object
class Service(BaseObject):
def __init__(self, cluster, service_name, task_definition, desired_count):
self.cluster_arn = cluster.arn
self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(
service_name)
self.name = service_name
self.status = 'ACTIVE'
self.running_count = 0
self.task_definition = task_definition.arn
self.desired_count = desired_count
self.events = []
self.deployments = [
{
'createdAt': datetime.now(pytz.utc),
'desiredCount': self.desired_count,
'id': 'ecs-svc/{}'.format(randint(0, 32**12)),
'pendingCount': self.desired_count,
'runningCount': 0,
'status': 'PRIMARY',
'taskDefinition': task_definition.arn,
'updatedAt': datetime.now(pytz.utc),
}
]
self.load_balancers = []
self.pending_count = 0
@property
def physical_resource_id(self):
return self.arn
@property
def response_object(self):
response_object = self.gen_response_object()
del response_object['name'], response_object['arn']
response_object['serviceName'] = self.name
response_object['serviceArn'] = self.arn
for deployment in response_object['deployments']:
if isinstance(deployment['createdAt'], datetime):
deployment['createdAt'] = deployment['createdAt'].isoformat()
if isinstance(deployment['updatedAt'], datetime):
deployment['updatedAt'] = deployment['updatedAt'].isoformat()
return response_object
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if isinstance(properties['Cluster'], Cluster):
cluster = properties['Cluster'].name
else:
cluster = properties['Cluster']
if isinstance(properties['TaskDefinition'], TaskDefinition):
task_definition = properties['TaskDefinition'].family
else:
task_definition = properties['TaskDefinition']
service_name = '{0}Service{1}'.format(cluster, int(random() * 10 ** 6))
desired_count = properties['DesiredCount']
# TODO: LoadBalancers
# TODO: Role
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_service(
cluster, service_name, task_definition, desired_count)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
if isinstance(properties['Cluster'], Cluster):
cluster_name = properties['Cluster'].name
else:
cluster_name = properties['Cluster']
if isinstance(properties['TaskDefinition'], TaskDefinition):
task_definition = properties['TaskDefinition'].family
else:
task_definition = properties['TaskDefinition']
desired_count = properties['DesiredCount']
ecs_backend = ecs_backends[region_name]
service_name = original_resource.name
if original_resource.cluster_arn != Cluster(cluster_name).arn:
# TODO: LoadBalancers
# TODO: Role
ecs_backend.delete_service(cluster_name, service_name)
new_service_name = '{0}Service{1}'.format(
cluster_name, int(random() * 10 ** 6))
return ecs_backend.create_service(
cluster_name, new_service_name, task_definition, desired_count)
else:
return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count)
class ContainerInstance(BaseObject):
def __init__(self, ec2_instance_id):
self.ec2_instance_id = ec2_instance_id
self.agent_connected = True
self.status = 'ACTIVE'
self.registered_resources = [
{'doubleValue': 0.0,
'integerValue': 4096,
'longValue': 0,
'name': 'CPU',
'type': 'INTEGER'},
{'doubleValue': 0.0,
'integerValue': 7482,
'longValue': 0,
'name': 'MEMORY',
'type': 'INTEGER'},
{'doubleValue': 0.0,
'integerValue': 0,
'longValue': 0,
'name': 'PORTS',
'stringSetValue': ['22', '2376', '2375', '51678', '51679'],
'type': 'STRINGSET'},
{'doubleValue': 0.0,
'integerValue': 0,
'longValue': 0,
'name': 'PORTS_UDP',
'stringSetValue': [],
'type': 'STRINGSET'}]
self.container_instance_arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(
str(uuid.uuid1()))
self.pending_task_count = 0
self.remaining_resources = [
{'doubleValue': 0.0,
'integerValue': 4096,
'longValue': 0,
'name': 'CPU',
'type': 'INTEGER'},
{'doubleValue': 0.0,
'integerValue': 7482,
'longValue': 0,
'name': 'MEMORY',
'type': 'INTEGER'},
{'doubleValue': 0.0,
'integerValue': 0,
'longValue': 0,
'name': 'PORTS',
'stringSetValue': ['22', '2376', '2375', '51678', '51679'],
'type': 'STRINGSET'},
{'doubleValue': 0.0,
'integerValue': 0,
'longValue': 0,
'name': 'PORTS_UDP',
'stringSetValue': [],
'type': 'STRINGSET'}
]
self.running_task_count = 0
self.version_info = {
'agentVersion': "1.0.0",
'agentHash': '4023248',
'dockerVersion': 'DockerVersion: 1.5.0'
}
@property
def response_object(self):
response_object = self.gen_response_object()
return response_object
class ContainerInstanceFailure(BaseObject):
def __init__(self, reason, container_instance_id):
self.reason = reason
self.arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(
container_instance_id)
@property
def response_object(self):
response_object = self.gen_response_object()
response_object['reason'] = self.reason
response_object['arn'] = self.arn
return response_object
class EC2ContainerServiceBackend(BaseBackend):
def __init__(self):
self.clusters = {}
self.task_definitions = {}
self.tasks = {}
self.services = {}
self.container_instances = {}
def describe_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split('/')[-1]
if ':' in task_definition_name:
family, revision = task_definition_name.split(':')
revision = int(revision)
else:
family = task_definition_name
revision = len(self.task_definitions.get(family, []))
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
return self.task_definitions[family][revision - 1]
elif family in self.task_definitions and revision == -1:
return self.task_definitions[family][revision]
else:
raise Exception(
"{0} is not a task_definition".format(task_definition_name))
def create_cluster(self, cluster_name):
cluster = Cluster(cluster_name)
self.clusters[cluster_name] = cluster
return cluster
def list_clusters(self):
"""
maxSize and pagination not implemented
"""
return [cluster.arn for cluster in self.clusters.values()]
def describe_clusters(self, list_clusters_name=None):
list_clusters = []
if list_clusters_name is None:
if 'default' in self.clusters:
list_clusters.append(self.clusters['default'].response_object)
else:
for cluster in list_clusters_name:
cluster_name = cluster.split('/')[-1]
if cluster_name in self.clusters:
list_clusters.append(
self.clusters[cluster_name].response_object)
else:
raise Exception(
"{0} is not a cluster".format(cluster_name))
return list_clusters
def delete_cluster(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
return self.clusters.pop(cluster_name)
else:
raise Exception("{0} is not a cluster".format(cluster_name))
def register_task_definition(self, family, container_definitions, volumes):
if family in self.task_definitions:
revision = len(self.task_definitions[family]) + 1
else:
self.task_definitions[family] = []
revision = 1
task_definition = TaskDefinition(
family, revision, container_definitions, volumes)
self.task_definitions[family].append(task_definition)
return task_definition
def list_task_definitions(self):
"""
Filtering not implemented
"""
task_arns = []
for task_definition_list in self.task_definitions.values():
task_arns.extend(
[task_definition.arn for task_definition in task_definition_list])
return task_arns
def deregister_task_definition(self, task_definition_str):
task_definition_name = task_definition_str.split('/')[-1]
family, revision = task_definition_name.split(':')
revision = int(revision)
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
return self.task_definitions[family].pop(revision - 1)
else:
raise Exception(
"{0} is not a task_definition".format(task_definition_name))
def run_task(self, cluster_str, task_definition_str, count, overrides, started_by):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
container_instances = list(
self.container_instances.get(cluster_name, {}).keys())
if not container_instances:
raise Exception("No instances found in cluster {}".format(cluster_name))
active_container_instances = [x for x in container_instances if
self.container_instances[cluster_name][x].status == 'ACTIVE']
resource_requirements = self._calculate_task_resource_requirements(task_definition)
# TODO: return event about unable to place task if not able to place enough tasks to meet count
placed_count = 0
for container_instance in active_container_instances:
container_instance = self.container_instances[cluster_name][container_instance]
container_instance_arn = container_instance.container_instance_arn
try_to_place = True
while try_to_place:
can_be_placed, message = self._can_be_placed(container_instance, resource_requirements)
if can_be_placed:
task = Task(cluster, task_definition, container_instance_arn,
resource_requirements, overrides or {}, started_by or '')
self.update_container_instance_resources(container_instance, resource_requirements)
tasks.append(task)
self.tasks[cluster_name][task.task_arn] = task
placed_count += 1
if placed_count == count:
return tasks
else:
try_to_place = False
return tasks
@staticmethod
def _calculate_task_resource_requirements(task_definition):
resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []}
for container_definition in task_definition.container_definitions:
resource_requirements["CPU"] += container_definition.get('cpu')
resource_requirements["MEMORY"] += container_definition.get("memory")
for port_mapping in container_definition.get("portMappings", []):
resource_requirements["PORTS"].append(port_mapping.get('hostPort'))
return resource_requirements
@staticmethod
def _can_be_placed(container_instance, task_resource_requirements):
"""
:param container_instance: The container instance trying to be placed onto
:param task_resource_requirements: The calculated resource requirements of the task in the form of a dict
:return: A boolean stating whether the given container instance has enough resources to have the task placed on
it as well as a description, if it cannot be placed this will describe why.
"""
# TODO: Implement default and other placement strategies as well as constraints:
# docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement.html
remaining_cpu = 0
remaining_memory = 0
reserved_ports = []
for resource in container_instance.remaining_resources:
if resource.get("name") == "CPU":
remaining_cpu = resource.get("integerValue")
elif resource.get("name") == "MEMORY":
remaining_memory = resource.get("integerValue")
elif resource.get("name") == "PORTS":
reserved_ports = resource.get("stringSetValue")
if task_resource_requirements.get("CPU") > remaining_cpu:
return False, "Not enough CPU credits"
if task_resource_requirements.get("MEMORY") > remaining_memory:
return False, "Not enough memory"
ports_needed = task_resource_requirements.get("PORTS")
for port in ports_needed:
if str(port) in reserved_ports:
return False, "Port clash"
return True, "Can be placed"
def start_task(self, cluster_str, task_definition_str, container_instances, overrides, started_by):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
if cluster_name not in self.tasks:
self.tasks[cluster_name] = {}
tasks = []
if not container_instances:
raise Exception("No container instance list provided")
container_instance_ids = [x.split('/')[-1]
for x in container_instances]
resource_requirements = self._calculate_task_resource_requirements(task_definition)
for container_instance_id in container_instance_ids:
container_instance = self.container_instances[cluster_name][
container_instance_id
]
task = Task(cluster, task_definition, container_instance.container_instance_arn,
resource_requirements, overrides or {}, started_by or '')
tasks.append(task)
self.update_container_instance_resources(container_instance, resource_requirements)
self.tasks[cluster_name][task.task_arn] = task
return tasks
def describe_tasks(self, cluster_str, tasks):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
if not tasks:
raise Exception("tasks cannot be empty")
response = []
for cluster, cluster_tasks in self.tasks.items():
for task_id, task in cluster_tasks.items():
if task_id in tasks or task.task_arn in tasks:
response.append(task)
return response
def list_tasks(self, cluster_str, container_instance, family, started_by, service_name, desiredStatus):
filtered_tasks = []
for cluster, tasks in self.tasks.items():
for arn, task in tasks.items():
filtered_tasks.append(task)
if cluster_str:
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
filtered_tasks = list(
filter(lambda t: cluster_name in t.cluster_arn, filtered_tasks))
if container_instance:
filtered_tasks = list(filter(
lambda t: container_instance in t.container_instance_arn, filtered_tasks))
if started_by:
filtered_tasks = list(
filter(lambda t: started_by == t.started_by, filtered_tasks))
return [t.task_arn for t in filtered_tasks]
def stop_task(self, cluster_str, task_str, reason):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not task_str:
raise Exception("A task ID or ARN is required")
task_id = task_str.split('/')[-1]
tasks = self.tasks.get(cluster_name, None)
if not tasks:
raise Exception(
"Cluster {} has no registered tasks".format(cluster_name))
for task in tasks.keys():
if task.endswith(task_id):
container_instance_arn = tasks[task].container_instance_arn
container_instance = self.container_instances[cluster_name][container_instance_arn.split('/')[-1]]
self.update_container_instance_resources(container_instance, tasks[task].resource_requirements,
removing=True)
tasks[task].last_status = 'STOPPED'
tasks[task].desired_status = 'STOPPED'
tasks[task].stopped_reason = reason
return tasks[task]
raise Exception("Could not find task {} on cluster {}".format(
task_str, cluster_name))
def create_service(self, cluster_str, service_name, task_definition_str, desired_count):
cluster_name = cluster_str.split('/')[-1]
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:
raise Exception("{0} is not a cluster".format(cluster_name))
task_definition = self.describe_task_definition(task_definition_str)
desired_count = desired_count if desired_count is not None else 0
service = Service(cluster, service_name,
task_definition, desired_count)
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
self.services[cluster_service_pair] = service
return service
def list_services(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
service_arns = []
for key, value in self.services.items():
if cluster_name + ':' in key:
service_arns.append(self.services[key].arn)
return sorted(service_arns)
def describe_services(self, cluster_str, service_names_or_arns):
cluster_name = cluster_str.split('/')[-1]
result = []
for existing_service_name, existing_service_obj in sorted(self.services.items()):
for requested_name_or_arn in service_names_or_arns:
cluster_service_pair = '{0}:{1}'.format(
cluster_name, requested_name_or_arn)
if cluster_service_pair == existing_service_name or existing_service_obj.arn == requested_name_or_arn:
result.append(existing_service_obj)
return result
def update_service(self, cluster_str, service_name, task_definition_str, desired_count):
cluster_name = cluster_str.split('/')[-1]
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
if cluster_service_pair in self.services:
if task_definition_str is not None:
self.describe_task_definition(task_definition_str)
self.services[
cluster_service_pair].task_definition = task_definition_str
if desired_count is not None:
self.services[
cluster_service_pair].desired_count = desired_count
return self.services[cluster_service_pair]
else:
raise Exception("cluster {0} or service {1} does not exist".format(
cluster_name, service_name))
def delete_service(self, cluster_name, service_name):
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
if cluster_service_pair in self.services:
service = self.services[cluster_service_pair]
if service.desired_count > 0:
raise Exception("Service must have desiredCount=0")
else:
return self.services.pop(cluster_service_pair)
else:
raise Exception("cluster {0} or service {1} does not exist".format(
cluster_name, service_name))
def register_container_instance(self, cluster_str, ec2_instance_id):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance = ContainerInstance(ec2_instance_id)
if not self.container_instances.get(cluster_name):
self.container_instances[cluster_name] = {}
container_instance_id = container_instance.container_instance_arn.split(
'/')[-1]
self.container_instances[cluster_name][
container_instance_id] = container_instance
self.clusters[cluster_name].registered_container_instances_count += 1
return container_instance
def list_container_instances(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
container_instances_values = self.container_instances.get(
cluster_name, {}).values()
container_instances = [
ci.container_instance_arn for ci in container_instances_values]
return sorted(container_instances)
def describe_container_instances(self, cluster_str, list_container_instance_ids):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
failures = []
container_instance_objects = []
for container_instance_id in list_container_instance_ids:
container_instance_id = container_instance_id.split('/')[-1]
container_instance = self.container_instances[
cluster_name].get(container_instance_id, None)
if container_instance is not None:
container_instance_objects.append(container_instance)
else:
failures.append(ContainerInstanceFailure(
'MISSING', container_instance_id))
return container_instance_objects, failures
def update_container_instances_state(self, cluster_str, list_container_instance_ids, status):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
status = status.upper()
if status not in ['ACTIVE', 'DRAINING']:
raise Exception("An error occurred (InvalidParameterException) when calling the UpdateContainerInstancesState operation: \
Container instances status should be one of [ACTIVE,DRAINING]")
failures = []
container_instance_objects = []
for container_instance_id in list_container_instance_ids:
container_instance = self.container_instances[cluster_name].get(container_instance_id, None)
if container_instance is not None:
container_instance.status = status
container_instance_objects.append(container_instance)
else:
failures.append(ContainerInstanceFailure('MISSING', container_instance_id))
return container_instance_objects, failures
def update_container_instance_resources(self, container_instance, task_resources, removing=False):
resource_multiplier = 1
if removing:
resource_multiplier = -1
for resource in container_instance.remaining_resources:
if resource.get("name") == "CPU":
resource["integerValue"] -= task_resources.get('CPU') * resource_multiplier
elif resource.get("name") == "MEMORY":
resource["integerValue"] -= task_resources.get('MEMORY') * resource_multiplier
elif resource.get("name") == "PORTS":
for port in task_resources.get("PORTS"):
if removing:
resource["stringSetValue"].remove(str(port))
else:
resource["stringSetValue"].append(str(port))
container_instance.running_task_count += resource_multiplier * 1
def deregister_container_instance(self, cluster_str, container_instance_str, force):
failures = []
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
container_instance_id = container_instance_str.split('/')[-1]
container_instance = self.container_instances[cluster_name].get(container_instance_id)
if container_instance is None:
raise Exception("{0} is not a container id in the cluster")
if not force and container_instance.running_task_count > 0:
raise Exception("Found running tasks on the instance.")
# Currently assume that people might want to do something based around deregistered instances
# with tasks left running on them - but nothing if no tasks were running already
elif force and container_instance.running_task_count > 0:
if not self.container_instances.get('orphaned'):
self.container_instances['orphaned'] = {}
self.container_instances['orphaned'][container_instance_id] = container_instance
del(self.container_instances[cluster_name][container_instance_id])
self._respond_to_cluster_state_update(cluster_str)
return container_instance, failures
def _respond_to_cluster_state_update(self, cluster_str):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
pass
ecs_backends = {}
for region, ec2_backend in ec2_backends.items():
ecs_backends[region] = EC2ContainerServiceBackend()
| {
"content_hash": "6f632dfaaf99e3287376b4d365a27b22",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 134,
"avg_line_length": 43.770725388601036,
"alnum_prop": 0.6056642301204462,
"repo_name": "kefo/moto",
"id": "bc847b32e70f1f9f7912cc157294393f85ee4c2d",
"size": "33791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/ecs/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "Python",
"bytes": "2996908"
},
{
"name": "Ruby",
"bytes": "188"
}
],
"symlink_target": ""
} |
"""Breadcrumb module for Zinnia"""
import re
from functools import wraps
from datetime import datetime
from django.utils.dateformat import format
from django.utils.timezone import is_aware
from django.utils.timezone import localtime
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
class Crumb(object):
"""
Part of the breadcrumbs.
"""
def __init__(self, name, url=None):
self.name = name
self.url = url
def year_crumb(creation_date):
"""
Crumb for a year.
"""
year = creation_date.strftime('%Y')
return Crumb(year, reverse('zinnia:entry_archive_year',
args=[year]))
def month_crumb(creation_date):
"""
Crumb for a month.
"""
year = creation_date.strftime('%Y')
month = creation_date.strftime('%m')
month_text = format(creation_date, 'F').capitalize()
return Crumb(month_text, reverse('zinnia:entry_archive_month',
args=[year, month]))
def day_crumb(creation_date):
"""
Crumb for a day.
"""
year = creation_date.strftime('%Y')
month = creation_date.strftime('%m')
day = creation_date.strftime('%d')
return Crumb(day, reverse('zinnia:entry_archive_day',
args=[year, month, day]))
def entry_breadcrumbs(entry):
"""
Breadcrumbs for an Entry.
"""
creation_date = entry.creation_date
if is_aware(creation_date):
creation_date = localtime(creation_date)
return [year_crumb(creation_date),
month_crumb(creation_date),
day_crumb(creation_date),
Crumb(entry.title)]
ZINNIA_ROOT_URL = lambda: reverse('zinnia:entry_archive_index')
MODEL_BREADCRUMBS = {'Tag': lambda x: [Crumb(_('Tags'),
reverse('zinnia:tag_list')),
Crumb(x.name)],
'Author': lambda x: [Crumb(_('Authors'),
reverse('zinnia:author_list')),
Crumb(x.__str__())],
'Category': lambda x: [Crumb(
_('Categories'), reverse('zinnia:category_list'))] +
[Crumb(anc.__str__(), anc.get_absolute_url())
for anc in x.get_ancestors()] + [Crumb(x.title)],
'Entry': entry_breadcrumbs}
ARCHIVE_REGEXP = re.compile(
r'.*(?P<year>\d{4})/(?P<month>\d{2})?/(?P<day>\d{2})?.*')
ARCHIVE_WEEK_REGEXP = re.compile(
r'.*(?P<year>\d{4})/week/(?P<week>\d+)?.*')
PAGE_REGEXP = re.compile(r'page/(?P<page>\d+).*$')
def handle_page_crumb(func):
"""
Decorator for handling the current page in the breadcrumbs.
"""
@wraps(func)
def wrapper(path, model, page, root_name):
path = PAGE_REGEXP.sub('', path)
breadcrumbs = func(path, model, root_name)
if page:
if page.number > 1:
breadcrumbs[-1].url = path
page_crumb = Crumb(_('Page %s') % page.number)
breadcrumbs.append(page_crumb)
return breadcrumbs
return wrapper
@handle_page_crumb
def retrieve_breadcrumbs(path, model_instance, root_name=''):
"""
Build a semi-hardcoded breadcrumbs
based of the model's url handled by Zinnia.
"""
breadcrumbs = []
if root_name:
breadcrumbs.append(Crumb(root_name, ZINNIA_ROOT_URL()))
if model_instance is not None:
key = model_instance.__class__.__name__
if key in MODEL_BREADCRUMBS:
breadcrumbs.extend(MODEL_BREADCRUMBS[key](model_instance))
return breadcrumbs
date_match = ARCHIVE_WEEK_REGEXP.match(path)
if date_match:
year, week = date_match.groups()
year_date = datetime(int(year), 1, 1)
date_breadcrumbs = [year_crumb(year_date),
Crumb(_('Week %s') % week)]
breadcrumbs.extend(date_breadcrumbs)
return breadcrumbs
date_match = ARCHIVE_REGEXP.match(path)
if date_match:
date_dict = date_match.groupdict()
path_date = datetime(
int(date_dict['year']),
date_dict.get('month') is not None and
int(date_dict.get('month')) or 1,
date_dict.get('day') is not None and
int(date_dict.get('day')) or 1)
date_breadcrumbs = [year_crumb(path_date)]
if date_dict['month']:
date_breadcrumbs.append(month_crumb(path_date))
if date_dict['day']:
date_breadcrumbs.append(day_crumb(path_date))
breadcrumbs.extend(date_breadcrumbs)
breadcrumbs[-1].url = None
return breadcrumbs
url_components = [comp for comp in
path.replace(ZINNIA_ROOT_URL(), '', 1).split('/')
if comp]
if len(url_components):
breadcrumbs.append(Crumb(_(url_components[-1].capitalize())))
return breadcrumbs
| {
"content_hash": "9ec41826ed47a472f08af60f62f3b3d1",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 31.961783439490446,
"alnum_prop": 0.5603826225587883,
"repo_name": "1844144/django-blog-zinnia",
"id": "8f4d3c8030334993cc1899f9e462a1445eb02641",
"size": "5018",
"binary": false,
"copies": "1",
"ref": "refs/heads/deep",
"path": "zinnia/breadcrumbs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "77370"
},
{
"name": "HTML",
"bytes": "75068"
},
{
"name": "JavaScript",
"bytes": "235617"
},
{
"name": "Makefile",
"bytes": "1789"
},
{
"name": "Python",
"bytes": "506854"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.