text stringlengths 4 1.02M | meta dict |
|---|---|
from panda3d.core import *
from panda3d.direct import *
import Playground
import random
from direct.fsm import ClassicFSM, State
from direct.actor import Actor
from toontown.toonbase import ToontownGlobals
class MMPlayground(Playground.Playground):
def __init__(self, loader, parentFSM, doneEvent):
Playground.Playground.__init__(self, loader, parentFSM, doneEvent)
self.activityFsm = ClassicFSM.ClassicFSM('Activity', [State.State('off', self.enterOff, self.exitOff, ['OnPiano']), State.State('OnPiano', self.enterOnPiano, self.exitOnPiano, ['off'])], 'off', 'off')
self.activityFsm.enterInitialState()
def load(self):
Playground.Playground.load(self)
def unload(self):
del self.activityFsm
Playground.Playground.unload(self)
def enter(self, requestStatus):
Playground.Playground.enter(self, requestStatus)
def exit(self):
Playground.Playground.exit(self)
def handleBookClose(self):
Playground.Playground.handleBookClose(self)
def teleportInDone(self):
Playground.Playground.teleportInDone(self)
def enterOff(self):
return None
def exitOff(self):
return None
def enterOnPiano(self):
base.localAvatar.b_setParent(ToontownGlobals.SPMinniesPiano)
def exitOnPiano(self):
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def showPaths(self):
from toontown.classicchars import CCharPaths
from toontown.toonbase import TTLocalizer
self.showPathPoints(CCharPaths.getPaths(TTLocalizer.Minnie))
| {
"content_hash": "22ff6612b39b62e7c3d65f05d63d5277",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 208,
"avg_line_length": 31.78,
"alnum_prop": 0.7123977344241661,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "3c298b668edc428dda8197b4ecbeaa303c795386",
"size": "1589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/safezone/MMPlayground.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
import configs.locs
import os
import db.text
import configs.mload
import random
import utils
class LineDB:
def __init__(self, name, plural, seperprefix,
seper, add, main, remove, showlist, random, eh='', channel=False,
aname=None, search=True):
self.dbfolder = configs.locs.userdb + '/' + plural
os.makedirs(self.dbfolder, exist_ok=True)
self.name = name
self.plural = plural
self.seperprefix = seperprefix
self.seper = seper
self.f_add = add
self.f_main = main
self.f_remove = remove
self.f_list = showlist
self.channel = channel
self.random = random
self.randomtext = "random" if random else "single"
self.aname = aname if aname else self.name
self.search = search
self.eh = eh
def splitline(self, i1, i2=""):
r1 = i2
r2 = i1
if i1 and i1[0].find(self.seperprefix) == 0:
r1 = i1.split()[0]
r2 = ' '.join(i1.split()[1:])
return (r1, r2)
def initserver(self, server):
path = "%s/%s.py" % (
self.dbfolder, server.entry['settings'])
server.state['%s.db' % self.plural] = db.text.DB(path)
if os.path.exists(path):
server.state['%s.db' % self.plural].load()
server.state['%s.db' % self.plural].save()
def configure(self, m):
m.set_help('Store and retrieve %s.' % self.plural)
m.add_command_hook('add',
{
'rights': [] if self.channel else ['normal'],
'function': self.f_add,
'noquote': True,
'help': 'Add a %s to the database.' % self.name,
'args': [
{
'name': '%s' % self.aname,
'optional': False,
'help': str(
'The %s, in this format: [%s%s] <%s>.%s,' % (
self.name, self.seperprefix, self.seper, self.name,
self.eh)),
'end': True,
}
]
})
m.add_command_hook('%stopics' % self.name,
{
'function': self.f_list,
'help': 'View %s topics.' % self.name,
'args': []
})
m.add_command_hook(self.name,
{
'function': self.f_main,
'noquote': True,
'help': 'Get a %s %s.' % (self.randomtext, self.name),
'args': [
{
'name': 'add',
'optional': True,
'keyvalue': '',
'help': 'Alias for %s.add.' % m.name,
},
{
'name': 'remove',
'optional': True,
'keyvalue': '',
'help': 'Alias for %s.remove.' % m.name,
},
{
'name': 'force',
'optional': True,
'keyvalue': '',
'help': '-force option for remove alias.',
},
{
'name': self.aname,
'optional': True,
'help': str(
'The %s, in this format'
' (add/remove will be ignored if -add or -remove'
' is set): [add/remove] [%s%s] <search>.' % (
self.name, self.seperprefix, self.seper)),
'end': True,
}
]
})
m.add_command_hook('remove',
{
'function': self.f_remove,
'noquote': True,
'help': 'Remove a %s.' % self.name,
'rights': [] if self.channel else ['normal'],
'args': [
{
'name': 'force',
'optional': True,
'keyvalue': '',
'help': 'Force removal of multiple quotes.',
},
{
'name': self.aname,
'optional': True,
'help': str(
'The %s, in this format: [%s%s] <search>.' % (
self.name, self.seperprefix, self.seper)),
'end': True,
}
]
})
m.add_command_alias(self.plural, self.name)
def add(self, fp, args, dt, channel=False):
line = args.getlinstr(self.aname)
topic, line = self.splitline(line,
dt)
if topic == "":
return 'You must specify a topic.'
elif topic[0] != '#' and channel:
topic = fp.room()
if topic and channel and not fp.hasright(topic + ',normal'):
return 'You need <normal> in the target channel.'
if (topic not in fp.server.state['%s.db' % self.plural].db()) or (
not self.random):
fp.server.state['%s.db' % self.plural].db()[topic] = []
if line in fp.server.state['%s.db' % self.plural].db()[topic]:
return 'That line already exists.'
fp.server.state['%s.db' % self.plural].db()[topic].append(line)
fp.server.state['%s.db' % self.plural].save()
return '"%s" has been added to %s %s' % (line, self.seper, topic)
def main(self, fp, args, dt):
line = args.getlinstr(self.aname, '')
if 'add' in args.lin:
return fp.execute('%s.add %s' % (self.plural, line))
elif 'remove' in args.lin:
return fp.execute('%s.remove %s%s' % (self.plural,
"-force " if 'force' in args.lin else '',
line))
else:
try:
action = line.split()[0]
if action == 'add':
line = " ".join(line.split()[1:])
return fp.execute('%s.add %s' % (self.plural, line))
elif action == 'remove':
line = " ".join(line.split()[1:])
return fp.execute('%s.remove %s%s' % (self.plural,
"-force " if 'force' in args.lin else '',
line))
except IndexError:
pass
if not self.search:
line = ""
topic, line = self.splitline(line,
dt)
db = fp.server.state['%s.db' % self.plural].db()
if topic not in db or len(db[topic]) == 0:
if not topic:
return 'No lines found.'
return 'There are no lines for %s' % topic
choices = []
for q in db[topic]:
if configs.match.matchnocase(q, line, False):
choices.append(q)
if not choices:
return 'No matching lines found.'
return random.choice(choices)
def showlist(self, fp, args):
return 'Topics: ' + utils.ltos(
fp.server.state['%s.db' % self.plural].db())
def remove(self, fp, args, dt, channel=False):
line = args.getlinstr(self.aname, '')
topic, line = self.splitline(line,
dt)
if topic[0] != '#' and channel:
topic = fp.room()
if channel and not fp.hasright(topic + ',normal'):
return 'You need <normal> in the target channel.'
db = fp.server.state['%s.db' % self.plural].db()
if topic not in db or len(db[topic]) == 0:
if not topic:
return 'No lines found.'
return 'There are no lines for %s' % topic
choices = []
for qi in range(len(db[topic])):
q = db[topic][qi]
if configs.match.matchnocase(q, line, False):
choices.append(qi)
if not choices:
return 'No matching lines found.'
if len(choices) > 1 and 'force' not in args.lin:
return '%d lines selected, use -force to delete.' % len(choices)
todelete = db[topic][choices[0]]
db[topic] = utils.remove_indices(db[topic], choices)
fp.server.state['%s.db' % self.plural].save()
if len(choices) > 1:
return 'Deleted %d lines.' % len(choices)
else:
return 'Deleted "%s".' % todelete
| {
"content_hash": "38d966ad0930c450d3f2a7ba5cc79e5e",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 76,
"avg_line_length": 37.36725663716814,
"alnum_prop": 0.43931320307874483,
"repo_name": "shacknetisp/fourthevaz",
"id": "da9228b34a9850a3b585eeb7da32b478bddf10e1",
"size": "8469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/default/share/linedb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3159"
},
{
"name": "Python",
"bytes": "252429"
},
{
"name": "Shell",
"bytes": "928"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import os
from subprocess import PIPE, Popen
from stacker.exceptions import ImproperlyConfigured
logger = logging.getLogger(__name__)
def _devnull():
return open(os.devnull, 'wb')
def run_command(provider, context, command, capture=False, interactive=False,
ignore_status=False, quiet=False, stdin=None, env=None,
**kwargs):
"""Run a custom command as a hook
Keyword Arguments:
command (list or str):
Command to run
capture (bool, optional):
If enabled, capture the command's stdout and stderr, and return
them in the hook result. Default: false
interactive (bool, optional):
If enabled, allow the command to interact with stdin. Otherwise,
stdin will be set to the null device. Default: false
ignore_status (bool, optional):
Don't fail the hook if the command returns a non-zero status.
Default: false
quiet (bool, optional):
Redirect the command's stdout and stderr to the null device,
silencing all output. Should not be enaled if `capture` is also
enabled. Default: false
stdin (str, optional):
String to send to the stdin of the command. Implicitly disables
`interactive`.
env (dict, optional):
Dictionary of environment variable overrides for the command
context. Will be merged with the current environment.
**kwargs:
Any other arguments will be forwarded to the `subprocess.Popen`
function. Interesting ones include: `cwd` and `shell`.
Examples:
.. code-block:: yaml
pre_build:
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: copy_env
args:
command: ['cp', 'environment.template', 'environment']
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: get_git_commit
args:
command: ['git', 'rev-parse', 'HEAD']
cwd: ./my-git-repo
capture: true
- path: stacker.hooks.command.run_command
args:
command: `cd $PROJECT_DIR/project; npm install'
env:
PROJECT_DIR: ./my-project
shell: true
"""
if quiet and capture:
raise ImproperlyConfigured(
__name__ + '.run_command',
'Cannot enable `quiet` and `capture` options simultaneously')
if quiet:
out_err_type = _devnull()
elif capture:
out_err_type = PIPE
else:
out_err_type = None
if interactive:
in_type = None
elif stdin:
in_type = PIPE
else:
in_type = _devnull()
if env:
full_env = os.environ.copy()
full_env.update(env)
env = full_env
logger.info('Running command: %s', command)
proc = Popen(command, stdin=in_type, stdout=out_err_type,
stderr=out_err_type, env=env, **kwargs)
try:
out, err = proc.communicate(stdin)
status = proc.wait()
if status == 0 or ignore_status:
return {
'returncode': proc.returncode,
'stdout': out,
'stderr': err
}
# Don't print the command line again if we already did earlier
if logger.isEnabledFor(logging.INFO):
logger.warn('Command failed with returncode %d', status)
else:
logger.warn('Command failed with returncode %d: %s', status,
command)
return None
finally:
if proc.returncode is None:
proc.kill()
| {
"content_hash": "03bff50e4e756b89cdd6005b68d3f7cb",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 77,
"avg_line_length": 32.38709677419355,
"alnum_prop": 0.5572709163346613,
"repo_name": "remind101/stacker",
"id": "2539753d8282746ec070fd7c1cd3c7bf0a2bbd0f",
"size": "4016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stacker/hooks/command.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1057"
},
{
"name": "Makefile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "512358"
},
{
"name": "Shell",
"bytes": "29734"
}
],
"symlink_target": ""
} |
import numpy as np
import sklearn.datasets as datasets
import sklearn.learning_curve as lc
import sklearn.naive_bayes as nb
import sklearn.svm as svm
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestLearningCurve(tm.TestCase):
def test_learning_curve(self):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
result = df.learning_curve.learning_curve(df.naive_bayes.GaussianNB())
expected = lc.learning_curve(nb.GaussianNB(), digits.data, digits.target)
self.assertEqual(len(result), 3)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assert_numpy_array_almost_equal(result[1], expected[1])
self.assert_numpy_array_almost_equal(result[2], expected[2])
def test_validation_curve(self):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
param_range = np.logspace(-2, -1, 2)
svc = df.svm.SVC(random_state=self.random_state)
result = df.learning_curve.validation_curve(svc, 'gamma',
param_range)
expected = lc.validation_curve(svm.SVC(random_state=self.random_state),
digits.data, digits.target,
'gamma', param_range)
self.assertEqual(len(result), 2)
self.assert_numpy_array_almost_equal(result[0], expected[0])
self.assert_numpy_array_almost_equal(result[1], expected[1])
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| {
"content_hash": "2b8b476cafa8aa8267b2f50ab9fde206",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 81,
"avg_line_length": 36.46808510638298,
"alnum_prop": 0.5950991831971996,
"repo_name": "sinhrks/pandas-ml",
"id": "0b05ecfceccf41ba622fd3a466f04fc63738e4e9",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas_ml/skaccessors/test/test_learning_curve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "431477"
},
{
"name": "Shell",
"bytes": "903"
}
],
"symlink_target": ""
} |
class Car(object):
def __init__(self, price, speed, fuel, milage)
def tax(self):
if price>10000:
self.tax = .015
else:
self.tax = .012
def print_all(self):
print 'Price' +str(self.price)
print 'Speed' +str(self.speed)
print 'Fuel' +str(self.fuel)
print 'Milage' +str(self.milage)
print 'Tax' +str(self.tax)
car1 = Car(20000, 50mph, full, 27mpg, .15)
car2 = Car(40000, 35mph, full, 17mpg, .15)
car3 = Car(44000, 30mph, full, 15mpg, .15)
car4 = Car(8000, 55mph, empty, 37mpg, .12)
car5 = Car(9000, 50mph, full, 29mpg, .12)
car6 = Car(10000, 45mph, empty, 35mpg, .15)
# Create a class called Car. In the__init__(), allow the user to specify the following attributes: price, speed, fuel, mileage. If the price is greater than 10,000, set the tax to be 15%. Otherwise, set the tax to be 12%.
# Create six different instances of the class Car. In the class have a method called display_all() that returns all the information about the car as a string. In your __init__(), call this display_all() method to display information about the car once the attributes have been defined.
# A sample output would be like this:
# Price: 2000
# Speed: 35mph
# Fuel: Full
# Mileage: 15mpg
# Tax: 0.12
# Price: 2000
# Speed: 5mph
# Fuel: Not Full
# Mileage: 105mpg
# Tax: 0.12
# Price: 2000
# Speed: 15mph
# Fuel: Kind of Full
# Mileage: 95mpg
# Tax: 0.12 | {
"content_hash": "f9f3e388ffec3f660225d089ea552ee0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 285,
"avg_line_length": 27.176470588235293,
"alnum_prop": 0.6746031746031746,
"repo_name": "authman/Python201609",
"id": "8750c05ed257570d1bc99410d5afa7b5fedd9417",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sparks_Steven/Assignments/OOP - Car/Car.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1231"
},
{
"name": "C",
"bytes": "430679"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "22689"
},
{
"name": "HTML",
"bytes": "168012"
},
{
"name": "JavaScript",
"bytes": "3734"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "590654"
},
{
"name": "Shell",
"bytes": "9350"
}
],
"symlink_target": ""
} |
from data.tools import Control
from data.states import init, run, halt
fuck = Control()
fuck.setup_states({'Init': init.Init(),
'Run': run.Run(),
'Halt': halt.Halt()
},'Init')
fuck.main()
| {
"content_hash": "e7a3fcdecfc228f0f073c11cef71dcb7",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5203252032520326,
"repo_name": "rabitdash/practice",
"id": "bd4e431479306f58d3156f8c39b5922168cbbb09",
"size": "246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python-pj/2048_OOP/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8563"
},
{
"name": "C++",
"bytes": "42312"
},
{
"name": "HTML",
"bytes": "208"
},
{
"name": "Java",
"bytes": "1409"
},
{
"name": "JavaScript",
"bytes": "835"
},
{
"name": "Lua",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "699"
},
{
"name": "Python",
"bytes": "152045"
},
{
"name": "Shell",
"bytes": "2102"
}
],
"symlink_target": ""
} |
import sys
import types
import cherrypy
import urllib
import unittest
import re
import os
import FileStoreDatabase
import DarunGrimSessions
import DarunGrimDatabase
import SecurityImplications
import MSPatchWeb
import FileStore
from mako.template import Template
from HTMLPages import *
config_file = 'DarunGrim3.cfg'
class WebServer(object):
DebugLevel = 0
def __init__(self):
#Something Configurable
self.BinariesStorageDirectory = r'C:\mat\Projects\Binaries'
self.MicrosoftBinariesStorageDirectory = self.BinariesStorageDirectory
self.DGFDirectory = r'C:\mat\Projects\DGFs'
self.IDAPath = None
self.PatchTemporaryStore = 'Patches'
if os.path.exists( config_file ):
fd = open( config_file )
config_data = fd.read()
fd.close()
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO( config_data ))
self.BinariesStorageDirectory = os.path.join( os.getcwd(), config.get("Directories", "BinariesStorage") )
self.MicrosoftBinariesStorageDirectory = self.BinariesStorageDirectory
self.DGFDirectory = os.path.join( os.getcwd(), config.get("Directories", "DGFDirectory") )
self.IDAPath = config.get("Directories", "IDAPath")
self.DatabaseName = config.get("Directories", "DatabaseName")
self.PatchTemporaryStore = config.get("Directories", "PatchTemporaryStore")
#Operation
database = FileStoreDatabase.Database( self.DatabaseName )
self.PatchTimelineAnalyzer = FileStoreDatabase.Analyzer( database = database )
self.DarunGrimSessionsInstance = DarunGrimSessions.Manager( self.DatabaseName, self.BinariesStorageDirectory, self.DGFDirectory, self.IDAPath )
self.PatternAnalyzer = SecurityImplications.PatternAnalyzer()
def index(self):
mytemplate = Template( IndexTemplateText )
database = FileStoreDatabase.Database( self.DatabaseName )
patches = database.GetPatches()
return mytemplate.render()
index.exposed = True
def ShowFileList(self, company_name = None, filename = None, version_string = None ):
filenames = []
database = FileStoreDatabase.Database( self.DatabaseName )
if company_name:
if filename:
if version_string:
#Show info
pass
else:
#List version strings
file_information_list = []
database = FileStoreDatabase.Database( self.DatabaseName )
for file_info in database.GetFileByCompanyFileName( company_name, filename ):
fullFilename = os.path.join( self.BinariesStorageDirectory, file_info.full_path)
archInfo = self.Is32bitExecutable( fullFilename)
file_information_list.append( (file_info.filename, file_info.ctime, file_info.mtime, file_info.added_time, file_info.md5, file_info.sha1, file_info.id, file_info.version_string, None, archInfo ) )
projects = database.GetProjects()
mytemplate = Template( FileListTemplate, input_encoding='utf-8' , output_encoding='utf-8' )
return mytemplate.render(
company_name = company_name,
filename = filename,
file_information_list = file_information_list,
show_add_to_queue = True,
projects = projects
)
else:
#List filenames
numVersions = []
for (filename, ) in database.GetFileNames( company_name ):
numVersion = len(database.GetFileByCompanyFileName( company_name, filename))
filenames.append( filename )
numVersions.append(numVersion)
mytemplate = Template( FileListFileNamesTemplateText, input_encoding='utf-8' , output_encoding='utf-8' )
return mytemplate.render(
company_name = company_name,
filenames = filenames,
numVersions = numVersions
)
else:
#List company_names
for (filename, ) in database.GetCompanyNames():
filenames.append( filename )
mytemplate = Template( FileListCompanyNamesTemplateText, input_encoding='utf-8' , output_encoding='utf-8' )
return mytemplate.render( filenames = filenames )
ShowFileList.exposed = True
def Is32bitExecutable( self, filename):
# determine the executable's base architecture, 32bit / 64bit
# TODO - this function might be located in somewhere else
import pefile
pe = pefile.PE(filename)
_32bitFlag = pefile.IMAGE_CHARACTERISTICS['IMAGE_FILE_32BIT_MACHINE']
if ( _32bitFlag & pe.FILE_HEADER.Machine ) == _32bitFlag:
return "32bit"
return "64bit"
def FileTree(self, company_name = None, filename = None, version_string = None ):
return """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<script type="text/javascript" src="http://static.jstree.com/v.1.0rc2/jquery.js"></script>
<script type="text/javascript" src="http://static.jstree.com/v.1.0rc2/jquery.cookie.js"></script>
<script type="text/javascript" src="http://static.jstree.com/v.1.0rc2/jquery.hotkeys.js"></script>
<script type="text/javascript" src="http://static.jstree.com/v.1.0rc2/jquery.jstree.js"></script>
</head>
<body>
""" + MainMenu + """
<div id="demo1" class="demo"></div>
<script type="text/javascript">
$(function () {
$("#demo1").jstree({
"json_data" :
{
// I chose an ajax enabled tree - again - as this is most common, and maybe a bit more complex
// All the options are the same as jQuery's except for `data` which CAN (not should) be a function
"ajax" : {
// the URL to fetch the data
"url" : "FileTreeJSON",
// this function is executed in the instance's scope (this refers to the tree instance)
// the parameter is the node being loaded (may be -1, 0, or undefined when loading the root nodes)
"data" : function (n) {
// the result is fed to the AJAX request `data` option
return {
"company_name" : n.attr ? n.attr("company_name"): "",
"filename" : n.attr ? n.attr("filename"): "",
"version_string" : n.attr ? n.attr("version_string"): ""
};
}
}
}
,
"plugins" : [ "themes", "json_data", "checkbox" ]
});
});
</script>
</body>
</html>"""
FileTree.exposed = True
def FileTreeJSON(self, company_name = None, filename = None, version_string = None ):
names = []
database = FileStoreDatabase.Database( self.DatabaseName )
if company_name:
if filename:
if version_string:
#Show info
pass
else:
#List version strings
print 'List version strings'
#List filenames
version_strings = []
for (id, name, ) in database.GetVersionStringsWithIDs( company_name, filename ):
tree_data = {}
tree_data[ "data" ] = name
tree_data[ "attr" ] = { "company_name": company_name, "filename": name }
version_strings.append( tree_data )
version_strings_json = json.dumps( version_strings )
return version_strings_json
else:
print 'List filenames'
#List filenames
file_names = []
for (name, ) in database.GetFileNames( company_name ):
tree_data = {}
tree_data[ "data" ] = name
tree_data[ "attr" ] = { "company_name": company_name, "filename": name }
tree_data[ "state" ] = "closed"
file_names.append( tree_data )
file_names_json = json.dumps( file_names )
return file_names_json
else:
company_names = []
for (name, ) in database.GetCompanyNames():
tree_data = {}
tree_data[ "data" ] = name
tree_data[ "attr" ] = { "company_name": name, "rel": "drive" }
tree_data[ "state" ] = "closed"
company_names.append( tree_data )
company_names_json = json.dumps( company_names )
return company_names_json
FileTreeJSON.exposed = True
def ShowFileImport( self, folder = None, move_file = None, overwrite_mode = None ):
mytemplate = Template( FileImportTemplateText )
if folder:
file_store = FileStore.FileProcessor( self.DatabaseName )
copy_file = True
if move_file == 'yes':
copy_file = False
overwrite_mode_val = False
if overwrite_mode and overwrite_mode == 'yes':
overwrite_mode_val = True
file_store.CheckInFiles( folder , self.BinariesStorageDirectory, copy_file = copy_file, overwrite_mode = overwrite_mode_val )
return mytemplate.render( folder = folder )
ShowFileImport.exposed = True
def ShowFileSearch( self, type = None, search_str = None,sub_type = None, sub_search_str = None, date_type = None, datepicker_from = None, datepicker_to = None ):
if type and search_str:
database = FileStoreDatabase.Database( self.DatabaseName )
file_infos = []
if type == 'Filename':
file_infos = database.GetFileByFileNameWildMatch( search_str, sub_type , sub_search_str.lower(), date_type, datepicker_from, datepicker_to )
elif type == 'MD5':
file_infos = database.GetFileByMD5( search_str.lower(), sub_type , sub_search_str.lower(), date_type, datepicker_from, datepicker_to )
elif type == 'SHA1':
file_infos = database.GetFileBySHA1( search_str.lower(), sub_type , sub_search_str.lower(), date_type, datepicker_from, datepicker_to )
elif type == 'File Path':
file_infos = database.GetFileBySrcFullPathWildMatch( search_str.lower(), sub_type , sub_search_str.lower(), date_type, datepicker_from, datepicker_to )
file_information_list = []
for file_info in file_infos:
file_information_list.append( (file_info.filename, file_info.ctime, file_info.mtime, file_info.added_time, file_info.md5, file_info.sha1, file_info.id, file_info.version_string, None ) )
projects = database.GetProjects()
mytemplate = Template( FileListTemplate, input_encoding='utf-8' , output_encoding='utf-8' )
return mytemplate.render(
company_name = "",
filename = "",
file_information_list = file_information_list,
show_add_to_queue = True,
projects = projects
)
else:
mytemplate = Template( """<%def name="layoutdata()">
<form name="input" action="ShowFileSearch">
<table>
<tr>
<td>
<select name="type">
<option value="Filename">Filename</option>
<option value="MD5">MD5</option>
<option value="SHA1">SHA1</option>
<option value="File Path">File Path</option>
</select>
</td>
<td colspan=2>
<input type="text" size="50" name="search_str" value=""/>
</td>
</tr>
<tr>
<td>
<select name="sub_type">
<option value="CompanyName">Company Name</option>
</select>
</td>
<td colspan=2>
<input type="text" size="50" name="sub_search_str" value=""/>
</td>
</tr>
<tr>
<td>
<select name="date_type">
<option value="CreatedDate">Created Date</option>
<option value="ModifiedDate">Modified Date</option>
<option value="AddedDate">Added Date</option>
</select>
</td>
<td>
<input id="datepicker_from" type="text" name="datepicker_from" value="">
</td>
<td>
<input id="datepicker_to" type="text" name="datepicker_to" value="">
</td>
</tr>
<table>
<p><input type="submit" value="Search"/>
</form>
</%def>
""" + BodyHTML )
return mytemplate.render()
ShowFileSearch.exposed = True
def ShowMSPatchList( self, operation = '' ):
if operation == 'update':
patch_downloader = MSPatchWeb.PatchDownloader( self.PatchTemporaryStore, self.DatabaseName )
patch_downloader.DownloadCurrentYearPatches()
mytemplate = Template( PatchesTemplateText )
database = FileStoreDatabase.Database( self.DatabaseName )
patches = database.GetPatches()
return mytemplate.render( patches=patches )
ShowMSPatchList.exposed = True
def PatchInfo( self, id ):
mytemplate = Template( PatchInfoTemplateText )
database = FileStoreDatabase.Database( self.DatabaseName )
downloads = database.GetDownloadByPatchID( id )
return mytemplate.render( id=id, downloads=downloads )
PatchInfo.exposed = True
def DownloadInfo(self, patch_id, id, operation = '' ):
database = FileStoreDatabase.Database( self.DatabaseName )
if operation == 'extract':
patch_temporary_folder = tempfile.mkdtemp()
patch_temporary_folder2 = tempfile.mkdtemp()
file_store = FileStore.MSFileProcessor( patch_temporary_folder, self.MicrosoftBinariesStorageDirectory, database = database )
patch_downloader = MSPatchWeb.PatchDownloader( patch_temporary_folder2, self.DatabaseName )
for download in database.GetDownloadByID( id ):
print 'Extracting', download.filename, download.url
if not os.path.isfile( download.filename ):
files = patch_downloader.DownloadFileByLink( download.url )
file_store.ExtractDownload( download, files[0] )
try:
os.removedirs( patch_temporary_folder2 )
except:
pass
try:
os.removedirs( patch_temporary_folder )
except:
pass
files = database.GetFileByDownloadID( id )
mytemplate = Template( DownloadInfoTemplateText )
return mytemplate.render(
patch_id = patch_id,
patch_name = database.GetPatchNameByID( patch_id ),
id = id,
files = files
)
DownloadInfo.exposed = True
def FileInfo( self, patch_id, download_id, id ):
#FileStoreDatabase
database = FileStoreDatabase.Database( self.DatabaseName )
files = database.GetFileByID( id )
print 'files', files
[ file_index_entry ] = files
filename = file_index_entry.filename
target_patch_name = file_index_entry.downloads.patches.name
source_id = 0
source_patch_name = 'Not Found'
source_filename = 'Not Found'
target_filename = filename
target_id = 0
print 'FileInfo: filename=', filename
for ( target_patch_name, target_file_entry, source_patch_name, source_file_entry ) in self.FileStoreDatabase.GetPatchPairsForAnalysis( filename = filename, id = id, patch_name = target_patch_name ):
print '='*80
print target_patch_name,source_patch_name
source_filename = source_file_entry['full_path']
source_id = source_file_entry['id']
target_filename = target_file_entry['full_path']
target_id = target_file_entry['id']
mytemplate = Template( FileInfoTemplateText )
database = FileStoreDatabase.Database( self.DatabaseName )
return mytemplate.render(
patch_id = patch_id,
patch_name = database.GetPatchNameByID( patch_id ),
download_id = download_id,
download_label = database.GetDownloadLabelByID( download_id),
id = id,
file_index_entry=file_index_entry,
source_patch_name = source_patch_name,
source_filename = source_filename,
source_id = source_id,
target_patch_name = target_patch_name,
target_filename = target_filename,
target_id = target_id
)
FileInfo.exposed = True
## Project Related ############################################################
def ShowProjects( self ):
#Show Add form
mytemplate = Template( """<%def name="layoutdata()">
<table id="mainTable" class="SortedTable">
<thead>
<tr>
<th>Name</th>
<th>Description</th>
<th>Edit</th>
<th>Remove</th>
</tr>
</thead>
<tbody>
% for project in projects:
<tr>
<td><a href="ShowProject?project_id=${project.id}">${project.name}</a></td>
<td>${project.description} </td>
<td><a href="ShowEditProject?project_id=${project.id}">Edit</a></td>
<td><a href="RemoveProject?project_id=${project.id}">Remove</a></td>
</tr>
% endfor
</tbody>
</table>
<hr>
<a href="ShowAddProjectPage">New Project</a>
</%def>
""" + BodyHTML )
database = FileStoreDatabase.Database( self.DatabaseName )
items = []
try:
projects = database.GetProjects()
except:
pass
return mytemplate.render( projects = projects )
ShowProjects.exposed = True
def ShowEditProject( self, project_id ):
#Show Add form
mytemplate = Template( """<%def name="layoutdata()">
<form name="input" action="UpdateProject">
<table>
<tr>
<td>Name</td>
<td><input type="text" size="50" name="name" value="${name}" /></td>
</tr>
<tr>
<td>Description</td>
<td><textarea cols="80" rows="10" name="description"/>${description}</textarea></td>
</tr>
<table>
<input type="hidden" name="project_id" value=${project_id} />
<p><input type="submit" value="Update"/>
</form>
</%def>
""" + BodyHTML )
#pass project_id name, description info
database = FileStoreDatabase.Database( self.DatabaseName )
project = database.GetProject( project_id )
return mytemplate.render( project_id = project_id, name = project.name, description = project.description )
ShowEditProject.exposed = True
def UpdateProject( self, project_id, name, description ):
#Edit project by project_id
database = FileStoreDatabase.Database( self.DatabaseName )
database.UpdateProject( project_id, name, description )
return self.ShowProjects()
UpdateProject.exposed = True
def RemoveProject( self, project_id ):
#Remove project by project_id
database = FileStoreDatabase.Database( self.DatabaseName )
database.RemoveProject( project_id )
return self.ShowProjects()
RemoveProject.exposed = True
def RemoveFromProject( self, project_member_id, project_id ):
#Remove project_member_id from project
database = FileStoreDatabase.Database( self.DatabaseName )
#Add to project
if type(project_member_id)!=type(list()):
project_member_id = [project_member_id]
for one_project_member_id in project_member_id:
database.RemoveProjectMember( one_project_member_id )
return self.ShowProject( project_id )
RemoveFromProject.exposed = True
def ShowAddProjectPage( self ):
#Show Add form
mytemplate = Template( """<%def name="layoutdata()">
<form name="input" action="AddProject">
<table>
<tr>
<td>Name</td>
<td><input type="text" size="50" name="name" value="" /> </td>
</tr>
<tr>
<td>Description</td>
<td><textarea cols="80" rows="10" name="description"/></textarea></td>
</tr>
<table>
<p><input type="submit" value="Add"/>
</form>
</%def>
""" + BodyHTML )
return mytemplate.render()
ShowAddProjectPage.exposed = True
def AddProjectImpl( self, name, description = '' ):
database = FileStoreDatabase.Database( self.DatabaseName )
project = database.AddProject( name, description )
database.Commit()
return project
def AddProject( self, name, description = '' ):
if name:
self.AddProjectImpl( name, description )
return self.ShowProjects()
else:
#TODO: Show error message
return self.ShowAddProjectPage()
AddProject.exposed = True
def ShowProject( self, project_id = None ):
database = FileStoreDatabase.Database( self.DatabaseName )
project_members = database.GetProjectMembers( project_id )
file_information_list = []
for project_member in project_members:
if project_member.fileindexes:
file_info = project_member.fileindexes
file_information_list.append( (file_info.filename, file_info.ctime, file_info.mtime, file_info.added_time, file_info.md5, file_info.sha1, file_info.id, file_info.version_string, project_member.id ) )
project_results = database.GetProjectResults( project_id = project_id )
print 'project_results=',project_results
project_result_list = []
for project_result in project_results:
print '\t', project_result.project_id, project_result.projects.name, project_result.source_file_id, project_result.target_file_id
source_file = database.GetFileByID( project_result.source_file_id )[0]
target_file = database.GetFileByID( project_result.target_file_id )[0]
project_result_list.append(
(
project_result.source_file_id,
project_result.target_file_id,
source_file.filename,
source_file.version_string,
target_file.filename,
target_file.version_string
)
)
mytemplate = Template( ProjectContentTemplate, input_encoding='utf-8' , output_encoding='utf-8' )
return mytemplate.render(
company_name = "",
filename = "",
file_information_list = file_information_list,
project_id = project_id,
show_add_to_queue = False,
project_result_list = project_result_list
)
ShowProject.exposed = True
def AddToProject( self, id = None, project_id = None, new_project_name = None, allbox = None ):
#Display project choose list
items = []
if new_project_name and new_project_name != "":
#Create new project
project = self.AddProjectImpl( new_project_name, "" )
#assign project_id = project.id
project_id = project.id
if not project_id:
return ""
if not id:
return self.ShowProject( project_id )
#Add to project
if type(id)!=type(list()):
id = [id]
database = FileStoreDatabase.Database( self.DatabaseName )
if not project_id:
projects = database.GetProjects()
mytemplate = Template( ProjectSelectionTemplate + BodyHTML )
return mytemplate.render( ids = id, projects = projects )
else:
for one_id in id:
database.AddToProject( project_id, one_id )
database.Commit()
return self.ShowProject( project_id )
AddToProject.exposed = True
##############################################################################
def GenerateDGFName( self, source_id, target_id ):
return os.path.join( self.DGFDirectory, str( source_id ) + '_' + str( target_id ) + '.dgf')
def ProcessProjectContent( self, source_id = None, target_id = None, operation = None, project_member_id = None, patch_id = 0, download_id = 0, file_id = 0, show_detail = 0, project_id = None, allbox = None ):
print 'operation=',operation
print 'project_member_id=',project_member_id
if operation == "Start Diffing":
return self.StartDiff( source_id, target_id, patch_id, download_id = download_id, file_id = file_id, show_detail = show_detail, project_id = project_id )
elif operation == "Remove From Project":
return self.RemoveFromProject( project_member_id, project_id )
#TODO: Put a better error page
return "Error"
ProcessProjectContent.exposed = True
def StartDiff( self, source_id, target_id, patch_id = 0, download_id = 0, file_id = 0, show_detail = 0, reset = 'no', project_id = None ):
databasename = self.GenerateDGFName( source_id, target_id )
reset_database = False
if reset == 'yes':
reset_database = True
self.DarunGrimSessionsInstance.InitFileDiffByID( source_id, target_id, databasename, reset_database )
#Add or Update Project
if project_id:
patch_database = FileStoreDatabase.Database( self.DatabaseName )
patch_database.AddProjectResult( project_id, source_id, target_id, databasename)
databasename = self.GenerateDGFName( source_id, target_id )
database = DarunGrimDatabase.Database( databasename )
#Check if dgf if correct? check size entries in GetFunctionMatchInfoCount?.
if database.GetFunctionMatchInfoCount() == 0:
#Remove DatabaseName
del database
self.DarunGrimSessionsInstance.RemoveDiffer ( source_id, target_id )
try:
os.remove( self.DarunGrimSessionsInstance.DatabaseName )
except:
print 'Error removing database file', self.DarunGrimSessionsInstance.DatabaseName
#Show error page?
if self.DebugLevel > 3:
print 'LogFilename', self.DarunGrimSessionsInstance.LogFilename
print 'LogFilenameForSource', self.DarunGrimSessionsInstance.LogFilenameForSource
print 'LogFilenameForTarget', self.DarunGrimSessionsInstance.LogFilenameForTarget
log = ''
log_for_source = ''
log_for_target = ''
try:
fd = open( self.DarunGrimSessionsInstance.LogFilename )
log = fd.read()
fd.close()
except:
pass
try:
fd = open( self.DarunGrimSessionsInstance.LogFilenameForSource )
log_for_source = fd.read()
fd.close()
except:
pass
try:
fd = open( self.DarunGrimSessionsInstance.LogFilenameForTarget )
log_for_target = fd.read()
fd.close()
except:
pass
mytemplate = Template( """<%def name="layoutdata()">
<title>Something is wrong with IDA execution.</title>
<table>
<tr>
<td><b>Log for Source(${source_filename})</b></td>
</tr>
<tr>
<td><pre>${log_for_source}</pre></td>
</tr>
<tr>
<td><b>Log for Target(${target_filename})</b></td>
</tr>
<tr>
<td><pre>${log_for_target}</pre></td>
</tr>
<tr>
<td><b>Darungrim Plugin Log</b></td>
</tr>
<tr>
<td><pre>${log}</pre></td>
</tr>
<table>
</%def>
""" + BodyHTML )
return mytemplate.render( log = log,
log_for_source = log_for_source,
log_for_target = log_for_target,
source_filename = self.DarunGrimSessionsInstance.SourceFileName,
target_filename = self.DarunGrimSessionsInstance.TargetFileName
)
else:
return self.GetFunctionMatchInfo(
patch_id,
download_id,
file_id,
source_id=source_id,
target_id = target_id,
show_detail = show_detail,
project_id = project_id
)
StartDiff.exposed = True
def GetFunctionMatchInfo( self, patch_id, download_id, file_id, source_id, target_id, show_detail = 0, project_id = None ):
databasename = self.GenerateDGFName( source_id, target_id )
database = DarunGrimDatabase.Database( databasename )
function_match_infos = []
for function_match_info in database.GetFunctionMatchInfo():
if function_match_info.non_match_count_for_the_source > 0 or \
function_match_info.non_match_count_for_the_target > 0 or \
function_match_info.match_count_with_modificationfor_the_source > 0:
function_match_infos.append( function_match_info )
patch_database = FileStoreDatabase.Database( self.DatabaseName )
source_file = patch_database.GetFileByID( source_id )[0]
target_file = patch_database.GetFileByID( target_id )[0]
mytemplate = Template( FunctionmatchInfosTemplateText )
return mytemplate.render(
source_file_name = source_file.filename,
source_file_version_string = source_file.version_string,
target_file_name = target_file.filename,
target_file_version_string = target_file.version_string,
patch_id = patch_id,
patch_name = patch_database.GetPatchNameByID( patch_id ),
download_id = download_id,
download_label = patch_database.GetDownloadLabelByID( download_id),
file_id = file_id,
file_name = patch_database.GetFileNameByID( file_id ),
source_id=source_id,
target_id = target_id,
function_match_infos = function_match_infos,
show_detail = 0,
project_id = project_id
)
def ShowFunctionMatchInfo( self, patch_id, download_id, file_id, source_id, target_id ):
return self.GetFunctionMatchInfo( patch_id, download_id, file_id, source_id, target_id )
ShowFunctionMatchInfo.exposed = True
def ShowBasicBlockMatchInfo( self, patch_id, download_id, file_id, source_id, target_id, source_address, target_address ):
return self.GetDisasmComparisonTextByFunctionAddress( patch_id, download_id, file_id, source_id, target_id, source_address, target_address )
ShowBasicBlockMatchInfo.exposed = True
def GetDisasmComparisonTextByFunctionAddress( self,
patch_id, download_id, file_id,
source_id, target_id, source_address, target_address,
source_function_name = None, target_function_name = None ):
patch_database = FileStoreDatabase.Database( self.DatabaseName )
source_file = patch_database.GetFileByID( source_id )[0]
target_file = patch_database.GetFileByID( target_id )[0]
databasename = self.GenerateDGFName( source_id, target_id )
darungrim_database = DarunGrimDatabase.Database( databasename )
source_address = int(source_address)
target_address = int(target_address)
self.DarunGrimSessionsInstance.JumpToAddresses( source_id, target_id, source_address, target_address )
if not source_function_name:
source_function_name = darungrim_database.GetBlockName( 1, source_address )
if not target_function_name:
target_function_name = darungrim_database.GetBlockName( 2, target_address )
comparison_table = darungrim_database.GetDisasmComparisonTextByFunctionAddress( source_address, target_address )
text_comparison_table = []
left_line_security_implications_score_total = 0
right_line_security_implications_score_total = 0
for ( left_address, left_lines, right_address, right_lines, match_rate ) in comparison_table:
left_line_security_implications_score = 0
right_line_security_implications_score = 0
if (right_address == 0 and left_address !=0) or match_rate < 100 :
( left_line_security_implications_score, left_line_text ) = self.PatternAnalyzer.GetDisasmLinesWithSecurityImplications( left_lines, right_address == 0 )
else:
left_line_text = "<p>".join( left_lines )
if (left_address == 0 and right_address !=0) or match_rate < 100 :
( right_line_security_implications_score, right_line_text ) = self.PatternAnalyzer.GetDisasmLinesWithSecurityImplications( right_lines, left_address == 0 )
else:
right_line_text = "<p>".join( right_lines )
left_line_security_implications_score_total += left_line_security_implications_score
right_line_security_implications_score_total += right_line_security_implications_score
text_comparison_table.append(( left_address, left_line_text, right_address, right_line_text, match_rate ) )
( source_address_infos, target_address_infos ) = darungrim_database.GetBlockAddressMatchTableByFunctionAddress( source_address, target_address )
self.DarunGrimSessionsInstance.ColorAddress( source_id, target_id, source_address_infos, target_address_infos )
mytemplate = Template( ComparisonTableTemplateText )
return mytemplate.render(
source_file_name = source_file.filename,
source_file_version_string = source_file.version_string,
target_file_name = target_file.filename,
target_file_version_string = target_file.version_string,
source_function_name = source_function_name,
target_function_name = target_function_name,
comparison_table = text_comparison_table,
source_id = source_id,
target_id = target_id,
source_address = source_address,
target_address = target_address,
patch_id = patch_id,
patch_name = patch_database.GetPatchNameByID( patch_id ),
download_id = download_id,
download_label = patch_database.GetDownloadLabelByID( download_id),
file_id = file_id,
file_name = patch_database.GetFileNameByID( file_id ),
)
def SyncIDA( self, source_id, target_id ):
self.DarunGrimSessionsInstance.SyncIDA( source_id, target_id )
return SyncIDAHTML % CloseButtonHTML
SyncIDA.exposed = True
def OpenInIDA( self, id ):
database = FileStoreDatabase.Database( self.DatabaseName )
file_path = ''
for file in database.GetFileByID( id ):
file_path = file.full_path
file_path = os.path.join( self.BinariesStorageDirectory, file_path )
target_file_path = file_path
idb_file_path = file_path[:-4] + '.idb'
if os.path.exists( idb_file_path ):
target_file_path = idb_file_path
import subprocess
subprocess.Popen( [ self.IDAPath, target_file_path ] )
return OpenInIDAHTML % ( self.IDAPath, target_file_path, CloseButtonHTML )
OpenInIDA.exposed = True
if __name__ == '__main__':
import ConfigParser
import io
import sys
if len( sys.argv ) > 1:
config_file = sys.argv[1]
print 'Configuration file is ' + config_file
fd = open( config_file )
config_data = fd.read()
fd.close()
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO( config_data ))
port = int( config.get("Global", "Port") )
cherrypy.config.update({'server.socket_host': '127.0.0.1',
'server.socket_port': port,
'response.timeout': 1000000
})
config = {
'/data': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), 'data'),
'tools.staticdir.content_types': {
'js': 'application/javascript',
'atom': 'application/atom+xml'
}
}
}
cherrypy.tree.mount( WebServer(), config=config )
cherrypy.engine.start()
cherrypy.engine.block()
| {
"content_hash": "972b61dbf75c53595b1cb4cffd419e60",
"timestamp": "",
"source": "github",
"line_count": 901,
"max_line_length": 211,
"avg_line_length": 35.08546059933407,
"alnum_prop": 0.6862267493356953,
"repo_name": "jenix21/DarunGrim",
"id": "179f8eedd297ce753cbdb594bd1cda875445f9e0",
"size": "31612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Src/Scripts/Server/DarunGrim3Server.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5771"
},
{
"name": "C",
"bytes": "5193786"
},
{
"name": "C++",
"bytes": "1064555"
},
{
"name": "CSS",
"bytes": "28653"
},
{
"name": "HTML",
"bytes": "427536"
},
{
"name": "JavaScript",
"bytes": "47762"
},
{
"name": "Makefile",
"bytes": "11356"
},
{
"name": "NSIS",
"bytes": "12019"
},
{
"name": "Python",
"bytes": "476590"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from io import TextIOBase
from splunklib.six import ensure_text
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
import xml.etree.ElementTree as ET
class Event(object):
"""Represents an event or fragment of an event to be written by this modular input to Splunk.
To write an input to a stream, call the ``write_to`` function, passing in a stream.
"""
def __init__(self, data=None, stanza=None, time=None, host=None, index=None, source=None,
sourcetype=None, done=True, unbroken=True):
"""There are no required parameters for constructing an Event
**Example with minimal configuration**::
my_event = Event(
data="This is a test of my new event.",
stanza="myStanzaName",
time="%.3f" % 1372187084.000
)
**Example with full configuration**::
excellent_event = Event(
data="This is a test of my excellent event.",
stanza="excellenceOnly",
time="%.3f" % 1372274622.493,
host="localhost",
index="main",
source="Splunk",
sourcetype="misc",
done=True,
unbroken=True
)
:param data: ``string``, the event's text.
:param stanza: ``string``, name of the input this event should be sent to.
:param time: ``float``, time in seconds, including up to 3 decimal places to represent milliseconds.
:param host: ``string``, the event's host, ex: localhost.
:param index: ``string``, the index this event is specified to write to, or None if default index.
:param source: ``string``, the source of this event, or None to have Splunk guess.
:param sourcetype: ``string``, source type currently set on this event, or None to have Splunk guess.
:param done: ``boolean``, is this a complete ``Event``? False if an ``Event`` fragment.
:param unbroken: ``boolean``, Is this event completely encapsulated in this ``Event`` object?
"""
self.data = data
self.done = done
self.host = host
self.index = index
self.source = source
self.sourceType = sourcetype
self.stanza = stanza
self.time = time
self.unbroken = unbroken
def write_to(self, stream):
"""Write an XML representation of self, an ``Event`` object, to the given stream.
The ``Event`` object will only be written if its data field is defined,
otherwise a ``ValueError`` is raised.
:param stream: stream to write XML to.
"""
if self.data is None:
raise ValueError("Events must have at least the data field set to be written to XML.")
event = ET.Element("event")
if self.stanza is not None:
event.set("stanza", self.stanza)
event.set("unbroken", str(int(self.unbroken)))
# if a time isn't set, let Splunk guess by not creating a <time> element
if self.time is not None:
ET.SubElement(event, "time").text = str(self.time)
# add all other subelements to this Event, represented by (tag, text)
subelements = [
("source", self.source),
("sourcetype", self.sourceType),
("index", self.index),
("host", self.host),
("data", self.data)
]
for node, value in subelements:
if value is not None:
ET.SubElement(event, node).text = value
if self.done:
ET.SubElement(event, "done")
if isinstance(stream, TextIOBase):
stream.write(ensure_text(ET.tostring(event)))
else:
stream.write(ET.tostring(event))
stream.flush() | {
"content_hash": "68e5ffaac14a162c88572f3b4b61fea2",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 109,
"avg_line_length": 38.74,
"alnum_prop": 0.5836344863190501,
"repo_name": "splunk/splunk-sdk-python",
"id": "9cd6cf3ae8b1be4aef5ea44c38acb622180a803f",
"size": "4456",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "splunklib/modularinput/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2203"
},
{
"name": "Python",
"bytes": "793887"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import Mock, patch, mock_open
from collections import Counter
from vasppy.calculation import Calculation, delta_E, delta_stoichiometry, energy_string_to_float, import_calculations_from_file
import numpy as np
class CalculationTestCase( unittest.TestCase ):
def test_calculation_is_initialised( self ):
this_title = 'A'
this_energy = -100.0
this_stoichiometry = { 'B': 1, 'C': 2 }
calculation = Calculation( title=this_title, energy=this_energy, stoichiometry=this_stoichiometry )
self.assertEqual( this_title, calculation.title )
self.assertEqual( this_energy, calculation.energy )
self.assertEqual( this_stoichiometry, calculation.stoichiometry )
def test_mul( self ):
title = 'A'
energy = -100.0
stoichiometry = { 'B': 1, 'C': 2 }
calculation = Calculation( title=title, energy=energy, stoichiometry=stoichiometry )
calculation.scale_stoichiometry = Mock( return_value={ 'B': 2, 'C': 4 } )
new_calculation = calculation * 2
self.assertEqual( title, new_calculation.title )
self.assertEqual( energy * 2.0, new_calculation.energy )
self.assertEqual( { 'B': 2, 'C': 4 }, new_calculation.stoichiometry )
calculation.scale_stoichiometry.assert_called_with( 2 )
def test_truediv( self ):
title = 'A'
energy = -100.0
stoichiometry = { 'B': 4, 'C': 2 }
calculation = Calculation( title=title, energy=energy, stoichiometry=stoichiometry )
with patch( 'vasppy.calculation.Calculation.__mul__' ) as mock_mul:
new_calculation = calculation / 2
mock_mul.assert_called_with( 0.5 )
def test_scale_stoichiometry( self ):
title = 'A'
energy = -100.0
stoichiometry = { 'B': 1, 'C': 2 }
calculation = Calculation( title=title, energy=energy, stoichiometry=stoichiometry )
self.assertEqual( calculation.scale_stoichiometry( 2 ), { 'B': 2, 'C': 4 } )
class CalculationSupportFunctionsTestCase( unittest.TestCase ):
def test_delta_E( self ):
titles = [ 'A', 'B', 'C' ]
energies = [ -50.5, -23.2, -10.1 ]
stoichiometries = [ { 'B': 1, 'C': 2 }, { 'B': 1, 'C': 1 }, { 'C': 1 } ]
calculations = [ Calculation( title=t, energy=e, stoichiometry=s ) for t, e, s in zip( titles, energies, stoichiometries ) ]
self.assertAlmostEqual( delta_E( reactants=[ calculations[0] ], products=calculations[1:3] ), +17.2 )
@patch( 'vasppy.calculation.delta_stoichiometry' )
def test_delta_E_raises_value_error_if_not_balanced( self, mock_delta_stoichiometry ):
titles = [ 'A', 'B', 'C' ]
energies = [ -50.5, -23.2, -10.1 ]
stoichiometries = [ { 'B': 1, 'C': 2 }, { 'B': 1, 'C': 1 }, { 'C': 2 } ]
calculations = [ Calculation( title=t, energy=e, stoichiometry=s ) for t, e, s in zip( titles, energies, stoichiometries ) ]
with self.assertRaises( ValueError ):
delta_E( reactants=[ calculations[0] ], products=calculations[1:3] )
mock_delta_stoichiometry.assert_called_with( [ calculations[0] ], calculations[1:3] )
def test_delta_stoichiometry( self ):
titles = [ 'A', 'B', 'C' ]
energies = [ -50.5, -23.2, -10.1 ]
stoichiometries = [ { 'B': 1, 'C': 2 }, { 'D': 1, 'C': 1 }, { 'C': 1 } ]
calculations = [ Calculation( title=t, energy=e, stoichiometry=s ) for t, e, s in zip( titles, energies, stoichiometries ) ]
self.assertEqual( delta_stoichiometry( reactants=[ calculations[0] ], products=calculations[1:3] ), { 'B': -1, 'D': 1 } )
def test_energy_string_to_float( self ):
test_strings = { '-1.2345 eV': -1.2345,
'0.2341 eV': 0.2341 }
for k, v in test_strings.items():
self.assertEqual( energy_string_to_float( k ), v )
@patch( 'vasppy.calculation.energy_string_to_float' )
@patch( 'vasppy.calculation.Calculation' )
def test_import_calculation_from_file( self, mock_Calculation, mock_energy_converter ):
mock_energy_converter.side_effect = [ -0.2414 ]
example_yaml = """
title: this_calculation
stoichiometry:
- A: 2
- B: 4
energy: -0.2414 eV
"""
with patch( 'builtins.open', mock_open( read_data=example_yaml ), create=True ) as m:
import_calculations_from_file( 'example_file' )
mock_Calculation.assert_called_with( energy=-0.2414, stoichiometry=Counter({'B': 4, 'A': 2}), title='this_calculation' )
mock_energy_converter.assert_called_with( '-0.2414 eV' )
def test_import_calculation_from_file_raises_ValueError_if_stoichiometry_is_not_set(self):
example_yaml = """
title: this_calculation
energy: -0.2414 eV
"""
with patch('builtins.open', mock_open(read_data=example_yaml), create=True) as m:
with self.assertRaises(ValueError):
import_calculations_from_file('example_file')
def test_import_calculation_from_file_skips_incomplete_files(self):
example_yaml = """
title: this_calculation
energy: -0.2414 eV
"""
with patch('builtins.open', mock_open(read_data=example_yaml), create=True) as m:
calcs = import_calculations_from_file('example_file',
skip_incomplete_records=True)
self.assertEqual(calcs, {})
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0403fef5ed58b2f3187046e03c0db17f",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 132,
"avg_line_length": 47.65811965811966,
"alnum_prop": 0.600609756097561,
"repo_name": "bjmorgan/vasppy",
"id": "6c418f828462e87289e4d3089c4545561d026551",
"size": "5576",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_calculation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "252155"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import copy
import os
import pickle
import unittest
import warnings
from django.core.exceptions import SuspiciousOperation
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (QueryDict, HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseNotAllowed,
HttpResponseNotModified, StreamingHttpResponse,
SimpleCookie, BadHeaderError,
parse_cookie)
from django.test import TestCase
from django.utils.encoding import smart_str, force_text
from django.utils.functional import lazy
from django.utils._os import upath
from django.utils import six
lazystr = lazy(force_text, six.text_type)
class QueryDictTests(unittest.TestCase):
def test_missing_key(self):
q = QueryDict(str(''))
self.assertRaises(KeyError, q.__getitem__, 'foo')
def test_immutability(self):
q = QueryDict(str(''))
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
def test_immutable_get_with_default(self):
q = QueryDict(str(''))
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict(str(''))
self.assertEqual(q.getlist('foo'), [])
if six.PY2:
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
self.assertRaises(KeyError, q.__getitem__, 'bar')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
if six.PY2:
self.assertFalse(q.has_key('bar'))
self.assertFalse('bar' in q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(str(''), mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(str(''), mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict(str('')).copy()
self.assertRaises(KeyError, q.__getitem__, "foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(str('')).copy()
q['name'] = 'john'
del q['name']
self.assertFalse('name' in q)
def test_basic_mutable_operations(self):
q = QueryDict(str('')).copy()
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertListEqual(sorted(list(six.iteritems(q))),
[('foo', 'another'), ('name', 'john')])
self.assertListEqual(sorted(list(six.iterlists(q))),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertListEqual(sorted(list(six.iterkeys(q))),
['foo', 'name'])
self.assertListEqual(sorted(list(six.itervalues(q))),
['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
if six.PY2:
self.assertEqual(q.has_key('vote'), True)
self.assertEqual('vote' in q, True)
if six.PY2:
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertRaises(AttributeError, q.__delitem__, 'vote')
if six.PY2:
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding).
This test doesn't apply under Python 3 because the URL is a string
and not a bytestring.
"""
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\ufffd')
self.assertEqual(q.getlist('foo'), ['bar', '\ufffd'])
def test_pickle(self):
q = QueryDict(str(''))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
# The following tests explicitly test types in addition to values
# because in Python 2 u'foo' == b'foo'.
# ASCII unicode or bytes values are converted to native strings.
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
# Latin-1 unicode or bytes values are also converted to native strings.
r['key'] = 'café'
self.assertEqual(r['key'], smart_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], smart_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
# Other unicode values are MIME-encoded (there's no way to pass them as bytes).
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
self.assertRaises(UnicodeError, r.__setitem__, 'føø', 'bar')
self.assertRaises(UnicodeError, r.__setitem__, 'føø'.encode('utf-8'), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test')
self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertEqual(r.get('test'), None)
def test_non_string_content(self):
#Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
#test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
#test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
#test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
#'\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(r.content, b'123\xde\x9e')
#with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.content = [b'abc', b'def']
self.assertEqual(r.content, b'abcdef')
self.assertRaises(TypeError if six.PY3 else UnicodeEncodeError,
setattr, r, 'content', ['\u079e'])
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# accessing the iterator works (once) after accessing .content
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'')
# accessing .content still works
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
self.assertRaises(SuspiciousOperation,
HttpResponseRedirect, url)
self.assertRaises(SuspiciousOperation,
HttpResponsePermanentRedirect, url)
class HttpResponseSubclassesTests(TestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect('/redirected/',
content='The resource has temporarily moved',
content_type='text/html')
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
# Test that url attribute is right
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'],
content='Only the GET method is allowed',
content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
class StreamingHttpResponseTests(TestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
class FileCloseTests(TestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# don't automatically close file when we finish iterating the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertFalse(file1.closed)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
list(r)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertTrue(";" not in c.output().rstrip(';')) # IE compat
self.assertTrue("," not in c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertTrue('good_cookie' in parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
"""
Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852
"""
self.assertTrue('good_cookie' in parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_httponly_after_load(self):
"""
Test that we can use httponly attribute on cookies that we load
"""
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
@unittest.skipUnless(six.PY2, "PY3 throws an exception on invalid cookie keys.")
def test_bad_cookie(self):
"""
Regression test for #18403
"""
r = HttpResponse()
r.set_cookie("a:.b/", 1)
self.assertEqual(len(r.cookies.bad_cookies), 1)
| {
"content_hash": "6fb424f45f10080dafd2f55ffe4f2c6d",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 95,
"avg_line_length": 38.76910828025478,
"alnum_prop": 0.5940362262291042,
"repo_name": "tastynoodle/django",
"id": "287d800c21ce8864a27b7d3e6e996c5ee8269e6c",
"size": "24393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/httpwrappers/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Tests that HTML generation is awesome."""
import datetime
import glob
import json
import os
import os.path
import re
import sys
import coverage
from coverage.backward import unicode_class
from coverage import env
from coverage.files import flat_rootname
import coverage.html
from coverage.misc import CoverageException, NotPython, NoSource
from tests.coveragetest import CoverageTest, TESTS_DIR
from tests.goldtest import CoverageGoldTest
from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any
class HtmlTestHelpers(CoverageTest):
"""Methods that help with HTML tests."""
def create_initial_files(self):
"""Create the source files we need to run these tests."""
self.make_file("main_file.py", """\
import helper1, helper2
helper1.func1(12)
helper2.func2(12)
""")
self.make_file("helper1.py", """\
def func1(x):
if x % 2:
print("odd")
""")
self.make_file("helper2.py", """\
def func2(x):
print("x is %d" % x)
""")
def run_coverage(self, covargs=None, htmlargs=None):
"""Run coverage.py on main_file.py, and create an HTML report."""
self.clean_local_file_imports()
cov = coverage.Coverage(**(covargs or {}))
self.start_import_stop(cov, "main_file")
return cov.html_report(**(htmlargs or {}))
def remove_html_files(self):
"""Remove the HTML files created as part of the HTML report."""
os.remove("htmlcov/index.html")
os.remove("htmlcov/main_file_py.html")
os.remove("htmlcov/helper1_py.html")
os.remove("htmlcov/helper2_py.html")
def get_html_report_content(self, module):
"""Return the content of the HTML report for `module`."""
filename = module.replace(".", "_").replace("/", "_") + ".html"
filename = os.path.join("htmlcov", filename)
with open(filename) as f:
return f.read()
def get_html_index_content(self):
"""Return the content of index.html.
Timestamps are replaced with a placeholder so that clocks don't matter.
"""
with open("htmlcov/index.html") as f:
index = f.read()
index = re.sub(
r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2}",
r"created at YYYY-MM-DD HH:MM",
index,
)
return index
def assert_correct_timestamp(self, html):
"""Extract the timestamp from `html`, and assert it is recent."""
timestamp_pat = r"created at (\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})"
m = re.search(timestamp_pat, html)
self.assertTrue(m, "Didn't find a timestamp!")
timestamp = datetime.datetime(*map(int, m.groups()))
# The timestamp only records the minute, so the delta could be from
# 12:00 to 12:01:59, or two minutes.
self.assert_recent_datetime(
timestamp,
seconds=120,
msg="Timestamp is wrong: {0}".format(timestamp),
)
class HtmlDeltaTest(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML delta speed-ups."""
def setUp(self):
super(HtmlDeltaTest, self).setUp()
# At least one of our tests monkey-patches the version of coverage.py,
# so grab it here to restore it later.
self.real_coverage_version = coverage.__version__
self.addCleanup(setattr, coverage, "__version__", self.real_coverage_version)
def test_html_created(self):
# Test basic HTML generation: files should be created.
self.create_initial_files()
self.run_coverage()
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/helper2_py.html")
self.assert_exists("htmlcov/style.css")
self.assert_exists("htmlcov/coverage_html.js")
def test_html_delta_from_source_change(self):
# HTML generation can create only the files that have changed.
# In this case, helper1 changes because its source is different.
self.create_initial_files()
self.run_coverage()
index1 = self.get_html_index_content()
self.remove_html_files()
# Now change a file and do it again
self.make_file("helper1.py", """\
def func1(x): # A nice function
if x % 2:
print("odd")
""")
self.run_coverage()
# Only the changed files should have been created.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_doesnt_exist("htmlcov/main_file_py.html")
self.assert_doesnt_exist("htmlcov/helper2_py.html")
index2 = self.get_html_index_content()
self.assertMultiLineEqual(index1, index2)
def test_html_delta_from_coverage_change(self):
# HTML generation can create only the files that have changed.
# In this case, helper1 changes because its coverage is different.
self.create_initial_files()
self.run_coverage()
self.remove_html_files()
# Now change a file and do it again
self.make_file("main_file.py", """\
import helper1, helper2
helper1.func1(23)
helper2.func2(23)
""")
self.run_coverage()
# Only the changed files should have been created.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_doesnt_exist("htmlcov/helper2_py.html")
def test_html_delta_from_settings_change(self):
# HTML generation can create only the files that have changed.
# In this case, everything changes because the coverage.py settings
# have changed.
self.create_initial_files()
self.run_coverage(covargs=dict(omit=[]))
index1 = self.get_html_index_content()
self.remove_html_files()
self.run_coverage(covargs=dict(omit=['xyzzy*']))
# All the files have been reported again.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper2_py.html")
index2 = self.get_html_index_content()
self.assertMultiLineEqual(index1, index2)
def test_html_delta_from_coverage_version_change(self):
# HTML generation can create only the files that have changed.
# In this case, everything changes because the coverage.py version has
# changed.
self.create_initial_files()
self.run_coverage()
index1 = self.get_html_index_content()
self.remove_html_files()
# "Upgrade" coverage.py!
coverage.__version__ = "XYZZY"
self.run_coverage()
# All the files have been reported again.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper2_py.html")
index2 = self.get_html_index_content()
fixed_index2 = index2.replace("XYZZY", self.real_coverage_version)
self.assertMultiLineEqual(index1, fixed_index2)
def test_file_becomes_100(self):
self.create_initial_files()
self.run_coverage()
# Now change a file and do it again
self.make_file("main_file.py", """\
import helper1, helper2
# helper1 is now 100%
helper1.func1(12)
helper1.func1(23)
""")
self.run_coverage(htmlargs=dict(skip_covered=True))
# The 100% file, skipped, shouldn't be here.
self.assert_doesnt_exist("htmlcov/helper1_py.html")
def test_status_format_change(self):
self.create_initial_files()
self.run_coverage()
self.remove_html_files()
with open("htmlcov/status.json") as status_json:
status_data = json.load(status_json)
self.assertEqual(status_data['format'], 1)
status_data['format'] = 2
with open("htmlcov/status.json", "w") as status_json:
json.dump(status_data, status_json)
self.run_coverage()
# All the files have been reported again.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper2_py.html")
class HtmlTitleTest(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML title support."""
def test_default_title(self):
self.create_initial_files()
self.run_coverage()
index = self.get_html_index_content()
self.assertIn("<title>Coverage report</title>", index)
self.assertIn("<h1>Coverage report:", index)
def test_title_set_in_config_file(self):
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = Metrics & stuff!\n")
self.run_coverage()
index = self.get_html_index_content()
self.assertIn("<title>Metrics & stuff!</title>", index)
self.assertIn("<h1>Metrics & stuff!:", index)
def test_non_ascii_title_set_in_config_file(self):
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = «ταБЬℓσ» numbers")
self.run_coverage()
index = self.get_html_index_content()
self.assertIn(
"<title>«ταБЬℓσ»"
" numbers", index
)
self.assertIn(
"<h1>«ταБЬℓσ»"
" numbers", index
)
def test_title_set_in_args(self):
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = Good title\n")
self.run_coverage(htmlargs=dict(title="«ταБЬℓσ» & stüff!"))
index = self.get_html_index_content()
self.assertIn(
"<title>«ταБЬℓσ»"
" & stüff!</title>", index
)
self.assertIn(
"<h1>«ταБЬℓσ»"
" & stüff!:", index
)
class HtmlWithUnparsableFilesTest(HtmlTestHelpers, CoverageTest):
"""Test the behavior when measuring unparsable files."""
def test_dotpy_not_python(self):
self.make_file("main.py", "import innocuous")
self.make_file("innocuous.py", "a = 1")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
self.make_file("innocuous.py", "<h1>This isn't python!</h1>")
msg = "Couldn't parse '.*innocuous.py' as Python source: .* at line 1"
with self.assertRaisesRegex(NotPython, msg):
cov.html_report()
def test_dotpy_not_python_ignored(self):
self.make_file("main.py", "import innocuous")
self.make_file("innocuous.py", "a = 2")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
self.make_file("innocuous.py", "<h1>This isn't python!</h1>")
cov.html_report(ignore_errors=True)
self.assertEqual(
len(cov._warnings),
1,
"Expected a warning to be thrown when an invalid python file is parsed")
self.assertIn(
"Could not parse Python file",
cov._warnings[0],
"Warning message should be in 'invalid file' warning"
)
self.assertIn(
"innocuous.py",
cov._warnings[0],
"Filename should be in 'invalid file' warning"
)
self.assert_exists("htmlcov/index.html")
# This would be better as a glob, if the HTML layout changes:
self.assert_doesnt_exist("htmlcov/innocuous.html")
def test_dothtml_not_python(self):
# We run a .html file, and when reporting, we can't parse it as
# Python. Since it wasn't .py, no error is reported.
# Run an "HTML" file
self.make_file("innocuous.html", "a = 3")
self.run_command("coverage run innocuous.html")
# Before reporting, change it to be an HTML file.
self.make_file("innocuous.html", "<h1>This isn't python at all!</h1>")
output = self.run_command("coverage html")
self.assertEqual(output.strip(), "No data to report.")
def test_execed_liar_ignored(self):
# Jinja2 sets __file__ to be a non-Python file, and then execs code.
# If that file contains non-Python code, a TokenError shouldn't
# have been raised when writing the HTML report.
source = "exec(compile('','','exec'), {'__file__': 'liar.html'})"
self.make_file("liar.py", source)
self.make_file("liar.html", "{# Whoops, not python code #}")
cov = coverage.Coverage()
self.start_import_stop(cov, "liar")
cov.html_report()
self.assert_exists("htmlcov/index.html")
def test_execed_liar_ignored_indentation_error(self):
# Jinja2 sets __file__ to be a non-Python file, and then execs code.
# If that file contains untokenizable code, we shouldn't get an
# exception.
source = "exec(compile('','','exec'), {'__file__': 'liar.html'})"
self.make_file("liar.py", source)
# Tokenize will raise an IndentationError if it can't dedent.
self.make_file("liar.html", "0\n 2\n 1\n")
cov = coverage.Coverage()
self.start_import_stop(cov, "liar")
cov.html_report()
self.assert_exists("htmlcov/index.html")
def test_decode_error(self):
# https://bitbucket.org/ned/coveragepy/issue/351/files-with-incorrect-encoding-are-ignored
# imp.load_module won't load a file with an undecodable character
# in a comment, though Python will run them. So we'll change the
# file after running.
self.make_file("main.py", "import sub.not_ascii")
self.make_file("sub/__init__.py")
self.make_file("sub/not_ascii.py", """\
# coding: utf-8
a = 1 # Isn't this great?!
""")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
# Create the undecodable version of the file. make_file is too helpful,
# so get down and dirty with bytes.
with open("sub/not_ascii.py", "wb") as f:
f.write(b"# coding: utf-8\na = 1 # Isn't this great?\xcb!\n")
with open("sub/not_ascii.py", "rb") as f:
undecodable = f.read()
self.assertIn(b"?\xcb!", undecodable)
cov.html_report()
html_report = self.get_html_report_content("sub/not_ascii.py")
expected = "# Isn't this great?�!"
self.assertIn(expected, html_report)
def test_formfeeds(self):
# https://bitbucket.org/ned/coveragepy/issue/360/html-reports-get-confused-by-l-in-the-code
self.make_file("formfeed.py", "line_one = 1\n\f\nline_two = 2\n")
cov = coverage.Coverage()
self.start_import_stop(cov, "formfeed")
cov.html_report()
formfeed_html = self.get_html_report_content("formfeed.py")
self.assertIn("line_two", formfeed_html)
class HtmlTest(HtmlTestHelpers, CoverageTest):
"""Moar HTML tests."""
def test_missing_source_file_incorrect_message(self):
# https://bitbucket.org/ned/coveragepy/issue/60
self.make_file("thefile.py", "import sub.another\n")
self.make_file("sub/__init__.py", "")
self.make_file("sub/another.py", "print('another')\n")
cov = coverage.Coverage()
self.start_import_stop(cov, 'thefile')
os.remove("sub/another.py")
missing_file = os.path.join(self.temp_dir, "sub", "another.py")
missing_file = os.path.realpath(missing_file)
msg = "(?i)No source for code: '%s'" % re.escape(missing_file)
with self.assertRaisesRegex(NoSource, msg):
cov.html_report()
def test_extensionless_file_collides_with_extension(self):
# It used to be that "program" and "program.py" would both be reported
# to "program.html". Now they are not.
# https://bitbucket.org/ned/coveragepy/issue/69
self.make_file("program", "import program\n")
self.make_file("program.py", "a = 1\n")
self.run_command("coverage run program")
self.run_command("coverage html")
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/program.html")
self.assert_exists("htmlcov/program_py.html")
def test_has_date_stamp_in_files(self):
self.create_initial_files()
self.run_coverage()
with open("htmlcov/index.html") as f:
self.assert_correct_timestamp(f.read())
with open("htmlcov/main_file_py.html") as f:
self.assert_correct_timestamp(f.read())
def test_reporting_on_unmeasured_file(self):
# It should be ok to ask for an HTML report on a file that wasn't even
# measured at all. https://bitbucket.org/ned/coveragepy/issues/403
self.create_initial_files()
self.make_file("other.py", "a = 1\n")
self.run_coverage(htmlargs=dict(morfs=['other.py']))
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/other_py.html")
def test_shining_panda_fix(self):
# The ShiningPanda plugin looks for "status.dat" to find HTML reports.
# Accommodate them, but only if we are running under Jenkins.
self.set_environ("JENKINS_URL", "Something or other")
self.create_initial_files()
self.run_coverage()
self.assert_exists("htmlcov/status.dat")
def test_report_skip_covered_no_branches(self):
self.make_file("main_file.py", """
import not_covered
def normal():
print("z")
normal()
""")
self.make_file("not_covered.py", """
def not_covered():
print("n")
""")
self.run_coverage(htmlargs=dict(skip_covered=True))
self.assert_exists("htmlcov/index.html")
self.assert_doesnt_exist("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/not_covered_py.html")
def test_report_skip_covered_100(self):
self.make_file("main_file.py", """
def normal():
print("z")
normal()
""")
res = self.run_coverage(covargs=dict(source="."), htmlargs=dict(skip_covered=True))
self.assertEqual(res, 100.0)
self.assert_doesnt_exist("htmlcov/main_file_py.html")
def test_report_skip_covered_branches(self):
self.make_file("main_file.py", """
import not_covered
def normal():
print("z")
normal()
""")
self.make_file("not_covered.py", """
def not_covered():
print("n")
""")
self.run_coverage(covargs=dict(branch=True), htmlargs=dict(skip_covered=True))
self.assert_exists("htmlcov/index.html")
self.assert_doesnt_exist("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/not_covered_py.html")
class HtmlStaticFileTest(CoverageTest):
"""Tests of the static file copying for the HTML report."""
def setUp(self):
super(HtmlStaticFileTest, self).setUp()
original_path = list(coverage.html.STATIC_PATH)
self.addCleanup(setattr, coverage.html, 'STATIC_PATH', original_path)
def test_copying_static_files_from_system(self):
# Make a new place for static files.
self.make_file("static_here/jquery.min.js", "Not Really JQuery!")
coverage.html.STATIC_PATH.insert(0, "static_here")
self.make_file("main.py", "print(17)")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
cov.html_report()
with open("htmlcov/jquery.min.js") as f:
jquery = f.read()
self.assertEqual(jquery, "Not Really JQuery!")
def test_copying_static_files_from_system_in_dir(self):
# Make a new place for static files.
INSTALLED = [
"jquery/jquery.min.js",
"jquery-hotkeys/jquery.hotkeys.js",
"jquery-isonscreen/jquery.isonscreen.js",
"jquery-tablesorter/jquery.tablesorter.min.js",
]
for fpath in INSTALLED:
self.make_file(os.path.join("static_here", fpath), "Not real.")
coverage.html.STATIC_PATH.insert(0, "static_here")
self.make_file("main.py", "print(17)")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
cov.html_report()
for fpath in INSTALLED:
the_file = os.path.basename(fpath)
with open(os.path.join("htmlcov", the_file)) as f:
contents = f.read()
self.assertEqual(contents, "Not real.")
def test_cant_find_static_files(self):
# Make the path point to useless places.
coverage.html.STATIC_PATH = ["/xyzzy"]
self.make_file("main.py", "print(17)")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
msg = "Couldn't find static file u?'.*'"
with self.assertRaisesRegex(CoverageException, msg):
cov.html_report()
def compare_html(dir1, dir2):
"""Specialized compare function for HTML files."""
scrubs = [
(r'/coverage.readthedocs.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'),
(r'coverage.py v[\d.abc]+', 'coverage.py vVER'),
(r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d', 'created at DATE'),
# Some words are identifiers in one version, keywords in another.
(r'<span class="(nam|key)">(print|True|False)</span>', r'<span class="nam">\2</span>'),
# Occasionally an absolute path is in the HTML report.
(re.escape(TESTS_DIR), 'TESTS_DIR'),
(r'/Users/ned/coverage/trunk/tests', 'TESTS_DIR'),
(flat_rootname(unicode_class(TESTS_DIR)), '_TESTS_DIR'),
(flat_rootname(u'/Users/ned/coverage/trunk/tests'), '_TESTS_DIR'),
]
if env.WINDOWS:
# For file paths...
scrubs += [(r"\\", "/")]
return compare(dir1, dir2, file_pattern="*.html", scrubs=scrubs)
class HtmlGoldTests(CoverageGoldTest):
"""Tests of HTML reporting that use gold files."""
root_dir = 'tests/farm/html'
def test_a(self):
self.output_dir("out/a")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage()
cov.start()
import a # pragma: nested
cov.stop() # pragma: nested
cov.html_report(a, directory='../out/a')
compare_html("gold_a", "out/a")
contains(
"out/a/a_py.html",
('<span class="key">if</span> <span class="num">1</span> '
'<span class="op"><</span> <span class="num">2</span>'),
(' <span class="nam">a</span> '
'<span class="op">=</span> <span class="num">3</span>'),
'<span class="pc_cov">67%</span>',
)
contains(
"out/a/index.html",
'<a href="a_py.html">a.py</a>',
'<span class="pc_cov">67%</span>',
'<td class="right" data-ratio="2 3">67%</td>',
)
def test_b_branch(self):
self.output_dir("out/b_branch")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(branch=True)
cov.start()
import b # pragma: nested
cov.stop() # pragma: nested
cov.html_report(b, directory="../out/b_branch")
compare_html("gold_b_branch", "out/b_branch")
contains(
"out/b_branch/b_py.html",
('<span class="key">if</span> <span class="nam">x</span> '
'<span class="op"><</span> <span class="num">2</span>'),
(' <span class="nam">a</span> <span class="op">=</span> '
'<span class="num">3</span>'),
'<span class="pc_cov">70%</span>',
('<span class="annotate short">8 ↛ 11</span>'
'<span class="annotate long">line 8 didn\'t jump to line 11, '
'because the condition on line 8 was never false</span>'),
('<span class="annotate short">17 ↛ exit</span>'
'<span class="annotate long">line 17 didn\'t return from function \'two\', '
'because the condition on line 17 was never false</span>'),
('<span class="annotate short">25 ↛ 26, '
'25 ↛ 28</span>'
'<span class="annotate long">2 missed branches: '
'1) line 25 didn\'t jump to line 26, '
'because the condition on line 25 was never true, '
'2) line 25 didn\'t jump to line 28, '
'because the condition on line 25 was never false</span>'),
)
contains(
"out/b_branch/index.html",
'<a href="b_py.html">b.py</a>',
'<span class="pc_cov">70%</span>',
'<td class="right" data-ratio="16 23">70%</td>',
)
def test_bom(self):
self.output_dir("out/bom")
with change_dir("src"):
# It's important that the source file really have a BOM, which can
# get lost, so check that it's really there.
with open("bom.py", "rb") as f:
first_three = f.read(3)
assert first_three == b"\xef\xbb\xbf"
# pylint: disable=import-error
cov = coverage.Coverage()
cov.start()
import bom # pragma: nested
cov.stop() # pragma: nested
cov.html_report(bom, directory="../out/bom")
compare_html("gold_bom", "out/bom")
contains(
"out/bom/bom_py.html",
'<span class="str">"3×4 = 12, ÷2 = 6±0"</span>',
)
def test_isolatin1(self):
self.output_dir("out/isolatin1")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage()
cov.start()
import isolatin1 # pragma: nested
cov.stop() # pragma: nested
cov.html_report(isolatin1, directory="../out/isolatin1")
compare_html("gold_isolatin1", "out/isolatin1")
contains(
"out/isolatin1/isolatin1_py.html",
'<span class="str">"3×4 = 12, ÷2 = 6±0"</span>',
)
def test_omit_1(self):
self.output_dir("out/omit_1")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable
cov.stop() # pragma: nested
cov.html_report(directory="../out/omit_1")
compare_html("gold_omit_1", "out/omit_1")
def test_omit_2(self):
self.output_dir("out/omit_2")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable
cov.stop() # pragma: nested
cov.html_report(directory="../out/omit_2", omit=["m1.py"])
compare_html("gold_omit_2", "out/omit_2")
def test_omit_3(self):
self.output_dir("out/omit_3")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable
cov.stop() # pragma: nested
cov.html_report(directory="../out/omit_3", omit=["m1.py", "m2.py"])
compare_html("gold_omit_3", "out/omit_3")
def test_omit_4(self):
self.output_dir("out/omit_4")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(config_file="omit4.ini", include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable
cov.stop() # pragma: nested
cov.html_report(directory="../out/omit_4")
compare_html("gold_omit_4", "out/omit_4")
def test_omit_5(self):
self.output_dir("out/omit_5")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(config_file="omit5.ini", include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable
cov.stop() # pragma: nested
cov.html_report()
compare_html("gold_omit_5", "out/omit_5")
def test_other(self):
self.output_dir("out/other")
with change_dir("src"):
# pylint: disable=import-error
sys.path.insert(0, "../othersrc")
cov = coverage.Coverage(include=["./*", "../othersrc/*"])
cov.start()
import here # pragma: nested # pylint: disable=unused-variable
cov.stop() # pragma: nested
cov.html_report(directory="../out/other")
# Different platforms will name the "other" file differently. Rename it
for p in glob.glob("out/other/*_other_py.html"):
os.rename(p, "out/other/blah_blah_other_py.html")
compare_html("gold_other", "out/other")
contains(
"out/other/index.html",
'<a href="here_py.html">here.py</a>',
'other_py.html">', 'other.py</a>',
)
def test_partial(self):
self.output_dir("out/partial")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage(config_file="partial.ini")
cov.start()
import partial # pragma: nested
cov.stop() # pragma: nested
cov.html_report(partial, directory="../out/partial")
compare_html("gold_partial", "out/partial")
contains(
"out/partial/partial_py.html",
'<p id="t8" class="stm run hide_run">',
'<p id="t11" class="stm run hide_run">',
'<p id="t14" class="stm run hide_run">',
# The "if 0" and "if 1" statements are optimized away.
'<p id="t17" class="pln">',
# The "raise AssertionError" is excluded by regex in the .ini.
'<p id="t24" class="exc">',
)
contains(
"out/partial/index.html",
'<a href="partial_py.html">partial.py</a>',
)
contains(
"out/partial/index.html",
'<span class="pc_cov">100%</span>'
)
def test_styled(self):
self.output_dir("out/styled")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage()
cov.start()
import a # pragma: nested
cov.stop() # pragma: nested
cov.html_report(a, directory="../out/styled", extra_css="extra.css")
compare_html("gold_styled", "out/styled")
compare("gold_styled", "out/styled", size_within=10, file_pattern="*.css")
contains(
"out/styled/a_py.html",
'<link rel="stylesheet" href="extra.css" type="text/css">',
('<span class="key">if</span> <span class="num">1</span> '
'<span class="op"><</span> <span class="num">2</span>'),
(' <span class="nam">a</span> <span class="op">=</span> '
'<span class="num">3</span>'),
'<span class="pc_cov">67%</span>'
)
contains(
"out/styled/index.html",
'<link rel="stylesheet" href="extra.css" type="text/css">',
'<a href="a_py.html">a.py</a>',
'<span class="pc_cov">67%</span>'
)
def test_tabbed(self):
self.output_dir("out/tabbed")
with change_dir("src"):
# pylint: disable=import-error
cov = coverage.Coverage()
cov.start()
import tabbed # pragma: nested
cov.stop() # pragma: nested
cov.html_report(tabbed, directory="../out/tabbed")
# Editors like to change things, make sure our source file still has tabs.
contains("src/tabbed.py", "\tif x:\t\t\t\t\t# look nice")
contains(
"out/tabbed/tabbed_py.html",
'> <span class="key">if</span> '
'<span class="nam">x</span><span class="op">:</span>'
' '
'<span class="com"># look nice</span>'
)
doesnt_contain("out/tabbed/tabbed_py.html", "\t")
def test_unicode(self):
self.output_dir("out/unicode")
with change_dir("src"):
# pylint: disable=import-error, redefined-builtin
cov = coverage.Coverage()
cov.start()
import unicode # pragma: nested
cov.stop() # pragma: nested
cov.html_report(unicode, directory="../out/unicode")
compare_html("gold_unicode", "out/unicode")
contains(
"out/unicode/unicode_py.html",
'<span class="str">"ʎd˙ǝbɐɹǝʌoɔ"</span>',
)
contains_any(
"out/unicode/unicode_py.html",
'<span class="str">"db40,dd00: x��"</span>',
'<span class="str">"db40,dd00: x󠄀"</span>',
)
| {
"content_hash": "aae39c7d63b9965eb1a97cc0df607aff",
"timestamp": "",
"source": "github",
"line_count": 890,
"max_line_length": 99,
"avg_line_length": 38.6314606741573,
"alnum_prop": 0.5636088651038335,
"repo_name": "blueyed/coveragepy",
"id": "9706cd33c5ab61a4c356fbe4a8e928662d50895c",
"size": "34581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_html.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3467"
},
{
"name": "C",
"bytes": "51630"
},
{
"name": "CSS",
"bytes": "13550"
},
{
"name": "HTML",
"bytes": "162518"
},
{
"name": "JavaScript",
"bytes": "30478"
},
{
"name": "Makefile",
"bytes": "3473"
},
{
"name": "PowerShell",
"bytes": "7288"
},
{
"name": "Python",
"bytes": "995953"
},
{
"name": "Shell",
"bytes": "1281"
}
],
"symlink_target": ""
} |
from collections import UserDict
class TupleUserDict(UserDict):
#########################################
# Implement these in a subclass
#########################################
# TODO: implement same behaviour via magic methods ...
@staticmethod
def _get_key(*args, **kwargs):
raise NotImplementedError
#########################################
# Magic Methods
#########################################
def __getitem__(self, item):
"""
Parameters
----------
item : (Interface, Interface)
"""
item = self._get_key(*item)
# old-style class, no super
res = UserDict.__getitem__(self, item)
return res
def __setitem__(self, key, item):
"""
Parameters
----------
key: (Interface, Interface)
item : object
"""
key = self._get_key(*key)
# old-style class, no super
return UserDict.__setitem__(self, key, item)
def __delitem__(self, key):
"""
Parameters
----------
item : (Interface, Interface)
"""
key = self._get_key(*key)
# old-style class, no super
return UserDict.__delitem__(self, key)
# NOTE: important for get() to work properly!
def __contains__(self, key):
"""
Parameters
----------
key : (Interface, Interface)
"""
key = self._get_key(*key)
# old-style class, no super
return UserDict.__contains__(self, key)
| {
"content_hash": "b0d813fd724fa03db1a3fa9fbfa7568b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 58,
"avg_line_length": 25.193548387096776,
"alnum_prop": 0.44878361075544176,
"repo_name": "miniworld-project/miniworld_core",
"id": "f2d7dac9c75822d5c0911312ac00af3353367e91",
"size": "1562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniworld/model/collections/TupleUserDict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "696934"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('comercial', '0018_metamodeloreferencia_multiplicador'),
]
operations = [
migrations.RemoveField(
model_name='metamodeloreferencia',
name='multiplicador',
),
]
| {
"content_hash": "7d74f6f84e6e47a394b6d900d917dca2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 21.11764705882353,
"alnum_prop": 0.6323119777158774,
"repo_name": "anselmobd/fo2",
"id": "3f63cb7c08eaf7fba4512a9ec2763f716145abcf",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/comercial/migrations/0019_remove_metamodeloreferencia_multiplicador.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('seminar', '0004_auto_20180621_1606'),
]
operations = [
migrations.AlterField(
model_name='seminarattachment',
name='seminar',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='seminar_attachments', to='seminar.Seminar'),
),
]
| {
"content_hash": "6d1988b627fdd5585ea6021e49c9b459",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 139,
"avg_line_length": 27.705882352941178,
"alnum_prop": 0.6454352441613588,
"repo_name": "PLUS-POSTECH/study.plus.or.kr",
"id": "a29f96b6068fceb6da9e07edd00bedd8ec1f8b22",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/seminar/migrations/0005_auto_20180703_1701.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "41"
},
{
"name": "Dockerfile",
"bytes": "321"
},
{
"name": "HTML",
"bytes": "56893"
},
{
"name": "Python",
"bytes": "103045"
},
{
"name": "Shell",
"bytes": "4363"
}
],
"symlink_target": ""
} |
from oslo_utils import timeutils
from senlin.common import exception
from senlin.common import utils as common_utils
from senlin.engine import cluster_policy as cpm
from senlin.objects import cluster_policy as cpo
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
CLUSTER_ID = '8d674833-6c0c-4e1c-928b-4bb3a4ebd4ae'
POLICY_ID = 'fa573870-fe44-42aa-84a9-08462f0e6999'
PROFILE_ID = '12abef70-ab31-484a-92aa-02388f0e6ccc'
class TestClusterPolicy(base.SenlinTestCase):
def setUp(self):
super(TestClusterPolicy, self).setUp()
self.context = utils.dummy_context()
def test_cluster_policy_init(self):
values = {
'priority': 12,
'enabled': True,
}
cp = cpm.ClusterPolicy(CLUSTER_ID, POLICY_ID, **values)
self.assertIsNone(cp.id)
self.assertEqual(CLUSTER_ID, cp.cluster_id)
self.assertEqual(POLICY_ID, cp.policy_id)
self.assertEqual(12, cp.priority)
self.assertTrue(cp.enabled)
self.assertEqual({}, cp.data)
self.assertIsNone(cp.last_op)
self.assertEqual('', cp.cluster_name)
self.assertEqual('', cp.policy_type)
self.assertEqual('', cp.policy_name)
def test_cluster_policy_store(self):
utils.create_profile(self.context, PROFILE_ID)
cluster = utils.create_cluster(self.context, CLUSTER_ID, PROFILE_ID)
policy = utils.create_policy(self.context, POLICY_ID)
values = {
'priority': 12,
'enabled': True,
}
cp = cpm.ClusterPolicy(cluster.id, policy.id, **values)
self.assertIsNone(cp.id)
cp_id = cp.store(self.context)
self.assertIsNotNone(cp_id)
result = cpo.ClusterPolicy.get(self.context, CLUSTER_ID, POLICY_ID)
self.assertIsNotNone(result)
self.assertEqual(12, result.priority)
self.assertTrue(result.enabled)
self.assertEqual({}, result.data)
self.assertIsNone(result.last_op)
# do an update
cp.enabled = False
cp.priority = 60
cp.data = {'foo': 'bar'}
timestamp = timeutils.utcnow(True)
cp.last_op = timestamp
new_id = cp.store(self.context)
self.assertEqual(cp_id, new_id)
result = cpo.ClusterPolicy.get(self.context, CLUSTER_ID, POLICY_ID)
self.assertIsNotNone(result)
self.assertFalse(result.enabled)
self.assertEqual(60, result.priority)
self.assertEqual({'foo': 'bar'}, result.data)
self.assertEqual(common_utils.isotime(timestamp),
common_utils.isotime(result.last_op))
def test_cluster_policy_load(self):
ex = self.assertRaises(exception.PolicyNotAttached,
cpm.ClusterPolicy.load,
self.context, 'some-cluster', 'any-policy')
self.assertEqual("The policy 'any-policy' is not attached to the "
"specified cluster 'some-cluster'.",
str(ex))
utils.create_profile(self.context, PROFILE_ID)
cluster = utils.create_cluster(self.context, CLUSTER_ID, PROFILE_ID)
policy = utils.create_policy(self.context, POLICY_ID)
values = {
'priority': 12,
'enabled': True,
}
cp = cpm.ClusterPolicy(cluster.id, policy.id, **values)
cp_id = cp.store(self.context)
result = cpm.ClusterPolicy.load(self.context, CLUSTER_ID, POLICY_ID)
self.assertEqual(cp_id, result.id)
self.assertEqual(cluster.id, result.cluster_id)
self.assertEqual(policy.id, result.policy_id)
self.assertTrue(result.enabled)
self.assertEqual(12, result.priority)
self.assertEqual({}, result.data)
self.assertIsNone(result.last_op)
self.assertEqual('test-cluster', result.cluster_name)
self.assertEqual('senlin.policy.dummy-1.0', result.policy_type)
self.assertEqual('test_policy', result.policy_name)
def test_cluster_policy_to_dict(self):
values = {
'priority': 12,
'enabled': True,
}
cp = cpm.ClusterPolicy(CLUSTER_ID, POLICY_ID, **values)
self.assertIsNone(cp.id)
expected = {
'id': None,
'cluster_id': CLUSTER_ID,
'policy_id': POLICY_ID,
'enabled': True,
'data': {},
'last_op': None,
'cluster_name': '',
'policy_type': '',
'policy_name': '',
}
self.assertEqual(expected, cp.to_dict())
| {
"content_hash": "e5207c78c797b7fe8ccbe6083e644098",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 35.53846153846154,
"alnum_prop": 0.604978354978355,
"repo_name": "openstack/senlin",
"id": "1ea077dbc99312098392c09114284c0382b50ada",
"size": "5169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/tests/unit/engine/test_cluster_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "69788"
},
{
"name": "Python",
"bytes": "3755028"
},
{
"name": "Shell",
"bytes": "24272"
}
],
"symlink_target": ""
} |
"""ext_gw_mode
Revision ID: 128e042a2b68
Revises: 32b517556ec9
Create Date: 2013-03-27 00:35:17.323280
"""
# revision identifiers, used by Alembic.
revision = '128e042a2b68'
down_revision = '32b517556ec9'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin',
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
'neutron.plugins.nec.nec_plugin.NECPluginV2',
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.add_column('routers', sa.Column('enable_snat', sa.Boolean(),
nullable=False, default=True))
# Set enable_snat to True for existing routers
op.execute("UPDATE routers SET enable_snat=True")
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_column('routers', 'enable_snat')
| {
"content_hash": "24b77e1088a78fcb9ea933799f066d74",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 29.8,
"alnum_prop": 0.7240865026099925,
"repo_name": "ykaneko/neutron",
"id": "20d08ae14af446fdb8a9b00ff56dceaad1cc95f5",
"size": "2002",
"binary": false,
"copies": "2",
"ref": "refs/heads/bug/1198917",
"path": "neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "4566707"
},
{
"name": "Shell",
"bytes": "9109"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from itertools import combinations
import numpy as np
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils._testing import ignore_warnings
from sklearn.decomposition._factor_analysis import _ortho_rotation
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
with pytest.raises(ValueError):
FactorAnalysis(svd_method="foo")
fa_fail = FactorAnalysis()
fa_fail.svd_method = "foo"
with pytest.raises(ValueError):
fa_fail.fit(X)
fas = []
for method in ["randomized", "lapack"]:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert X_t.shape == (n_samples, n_components)
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert diff > 0.0, "Log likelihood dif not increase"
# Sample Covariance
scov = np.cov(X, rowvar=0.0, bias=1.0)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert diff < 0.1, "Mean absolute difference is %f" % diff
fa = FactorAnalysis(
n_components=n_components, noise_variance_init=np.ones(n_features)
)
with pytest.raises(ValueError):
fa.fit(X[:, :2])
def f(x, y):
return np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ["loglike_", "components_", "noise_variance_"]:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
with pytest.warns(ConvergenceWarning):
fa1.fit(X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
# test rotation
n_components = 2
results, projections = {}, {}
for method in (None, "varimax", "quartimax"):
fa_var = FactorAnalysis(n_components=n_components, rotation=method)
results[method] = fa_var.fit_transform(X)
projections[method] = fa_var.get_covariance()
for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2):
assert not np.allclose(results[rot1], results[rot2])
assert np.allclose(projections[rot1], projections[rot2], atol=3)
with pytest.raises(ValueError):
FactorAnalysis(rotation="not_implemented").fit_transform(X)
# test against R's psych::principal with rotate="varimax"
# (i.e., the values below stem from rotating the components in R)
# R's factor analysis returns quite different values; therefore, we only
# test the rotation itself
factors = np.array(
[
[0.89421016, -0.35854928, -0.27770122, 0.03773647],
[-0.45081822, -0.89132754, 0.0932195, -0.01787973],
[0.99500666, -0.02031465, 0.05426497, -0.11539407],
[0.96822861, -0.06299656, 0.24411001, 0.07540887],
]
)
r_solution = np.array(
[[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]]
)
rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T
assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3)
| {
"content_hash": "b4d4f2d45e57aa3af529e385bc0bdc2e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 81,
"avg_line_length": 36.95798319327731,
"alnum_prop": 0.6421100500227376,
"repo_name": "huzq/scikit-learn",
"id": "08aad7e5d32e98839023badf66a7c6363d802a93",
"size": "4526",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/decomposition/tests/test_factor_analysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
} |
class Node:
__slots__ = '_data', '_next'
def __init__(self, data, successor):
self._next = successor
self._data = data
@property
def data(self):
return self._data
@property
def next(self):
return self._next
@next.setter
def next(self, other: 'Node'):
if other is None:
self._next = None
return
if not isinstance(other, self.__class__):
raise ValueError("Type invariant violation.")
self._next = other
def __repr__(self):
return '#<{} data={} next={}>' \
.format(self.__class__.__name__, self.data, self.next)
| {
"content_hash": "840d92976abb6159c7a4fda99701924a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 23.678571428571427,
"alnum_prop": 0.5143288084464555,
"repo_name": "rlishtaba/py-algorithms",
"id": "415526cd18f84015c525ff642b382a828116b714",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_algorithms/lists/node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199367"
}
],
"symlink_target": ""
} |
"""HomeKit session fixtures."""
from unittest.mock import patch
from pyhap.accessory_driver import AccessoryDriver
import pytest
from homeassistant.components.homekit.const import EVENT_HOMEKIT_CHANGED
from homeassistant.core import callback as ha_callback
@pytest.fixture(scope="session")
def hk_driver():
"""Return a custom AccessoryDriver instance for HomeKit accessory init."""
with patch("pyhap.accessory_driver.Zeroconf"), patch(
"pyhap.accessory_driver.AccessoryEncoder"
), patch("pyhap.accessory_driver.HAPServer"), patch(
"pyhap.accessory_driver.AccessoryDriver.publish"
):
return AccessoryDriver(pincode=b"123-45-678", address="127.0.0.1")
@pytest.fixture
def events(hass):
"""Yield caught homekit_changed events."""
events = []
hass.bus.async_listen(
EVENT_HOMEKIT_CHANGED, ha_callback(lambda e: events.append(e))
)
yield events
| {
"content_hash": "5b2d1020a882c815a11ab53cb2ea5aa8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 31.517241379310345,
"alnum_prop": 0.7242888402625821,
"repo_name": "postlund/home-assistant",
"id": "ef534d0e47286f97534deb6553b479ebb0340dc4",
"size": "914",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/homekit/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(1, "../../")
import h2o,tests
def stratified_split():
fr = h2o.import_file(tests.locate("bigdata/laptop/covtype/covtype.data"))
stratified = fr[54].stratified_split()
train = fr[stratified=="train"]
test = fr[stratified=="test"]
print (fr[54].table()["Count"] / fr[54].table()["Count"].sum()).show()
print (train[54].table()["Count"] / train[54].table()["Count"].sum()).show()
print (test[54].table()["Count"] / test[54].table()["Count"].sum()).show()
if __name__ == "__main__":
tests.run_test(sys.argv, stratified_split)
| {
"content_hash": "a5a717ea7d7a2aa7f22050499f51d357",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 37.86666666666667,
"alnum_prop": 0.6285211267605634,
"repo_name": "kyoren/https-github.com-h2oai-h2o-3",
"id": "7fa5cddee88c7495c442b4aec3b35a53b930324d",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_misc/pyunit_stratified_split_medium.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162402"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "HTML",
"bytes": "139398"
},
{
"name": "Java",
"bytes": "5567457"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34048"
},
{
"name": "Python",
"bytes": "2179689"
},
{
"name": "R",
"bytes": "1677531"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16346"
},
{
"name": "Shell",
"bytes": "45182"
},
{
"name": "TeX",
"bytes": "546032"
}
],
"symlink_target": ""
} |
import os
from vee.cli import style_error
from vee.commands.main import command, argument
from vee.subproc import call
from vee.utils import makedirs
@command(
help='run a command in the database',
parse_known_args=True,
aliases=['sqlite'],
acquire_lock=True,
group='plumbing',
)
def sqlite3(args, *command):
home = args.assert_home()
cmd = ['sqlite3', home.db.path]
cmd.extend(command)
os.execvp('sqlite3', cmd)
| {
"content_hash": "33c24454e447b1b06bd60cb2c1ca90ba",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 47,
"avg_line_length": 22.6,
"alnum_prop": 0.6880530973451328,
"repo_name": "westernx/vee",
"id": "128ed3f7a83b2f412f9b05cce51f19d2bc662904",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vee/commands/sqlite3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "590"
},
{
"name": "Makefile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "334426"
},
{
"name": "Ruby",
"bytes": "479"
},
{
"name": "Shell",
"bytes": "1027"
},
{
"name": "Smarty",
"bytes": "810"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons.constants import AMBARI_SUDO_BINARY
from functions import calc_xmn_from_xms, ensure_unit_for_memory
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from resource_management import *
from resource_management.libraries.functions import conf_select, stack_select
from resource_management.libraries.functions.expect import expect
import status_params
from hbase import *
def treat_value_as_mb(value1):
value = str(value1)
try:
part = int(value.strip()[:-1]) if value.lower().strip()[-1:] == 'm' else int(value)
return str(part) + 'm'
except:
return None
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version = format_stack_version(stack_version_unformatted)
component_directory = status_params.component_directory
#hadoop params
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
daemon_script = format('/usr/iop/current/{component_directory}/bin/hbase-daemon.sh')
region_mover = format('/usr/iop/current/{component_directory}/bin/region_mover.rb')
region_drainer = format('/usr/iop/current/{component_directory}/bin/draining_servers.rb')
hbase_cmd = format('/usr/iop/current/{component_directory}/bin/hbase')
limits_conf_dir = "/etc/security/limits.d"
hbase_conf_dir = status_params.hbase_conf_dir
hbase_excluded_hosts = config['commandParams']['excluded_hosts']
hbase_drain_only = default("/commandParams/mark_draining_only",False)
hbase_included_hosts = config['commandParams']['included_hosts']
hbase_user = status_params.hbase_user
hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
smokeuser = config['configurations']['cluster-env']['smokeuser']
_authentication = config['configurations']['core-site']['hadoop.security.authentication']
security_enabled = config['configurations']['cluster-env']['security_enabled']
hbase_hdfs_user_dir = format("/user/{hbase_user}")
hbase_hdfs_user_mode = 0755
# this is "hadoop-metrics.properties" for 1.x stacks
metric_prop_file_name = "hadoop-metrics2-hbase.properties"
# not supporting 32 bit jdk.
java64_home = config['hostLevelParams']['java_home']
log_dir = config['configurations']['hbase-env']['hbase_log_dir']
master_heapsize_cfg = config['configurations']['hbase-env']['hbase_master_heapsize']
master_heapsize = treat_value_as_mb(master_heapsize_cfg)
hbase_javaopts_properties = config['configurations']['hbase-javaopts-properties']['content']
iop_full_version = get_iop_version()
hbase_javaopts_properties = str(hbase_javaopts_properties)
if hbase_javaopts_properties.find('-Diop.version') == -1:
hbase_javaopts_properties = hbase_javaopts_properties+ ' -Diop.version=' + str(iop_full_version)
regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float) #AMBARI-15614
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
pid_dir = status_params.pid_dir
tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
local_dir = config['configurations']['hbase-site']['hbase.local.dir']
# TODO UPGRADE default, update site during upgrade
#_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
#local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
metric_collector_host = ams_collector_hosts[0]
metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_port and metric_collector_port.find(':') != -1:
metric_collector_port = metric_collector_port.split(':')[1]
pass
# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
if 'slave_hosts' in config['clusterHostInfo']:
rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
else:
rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts')
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smokeuser_permissions = "RWXCA"
service_check_data = functions.get_unique_id_and_date()
user_group = config['configurations']['cluster-env']["user_group"]
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
rest_server_jaas_princ = config['configurations']['hbase-site']['hbase.rest.kerberos.principal'].replace('_HOST',_hostname_lowercase)
rest_server_spnego_jaas_princ = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.principal'].replace('_HOST',_hostname_lowercase)
master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
rest_server_keytab_path = config['configurations']['hbase-site']['hbase.rest.keytab.file']
rest_server_spnego_keytab_path = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.keytab']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
kinit_path_local = functions.get_kinit_path()
if security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
else:
kinit_cmd = ""
#log4j.properties
if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
log4j_props = config['configurations']['hbase-log4j']['content']
else:
log4j_props = None
hbase_env_sh_template = config['configurations']['hbase-env']['content']
hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
hbase_staging_dir = "/apps/hbase/staging"
#for create_hdfs_directory
hostname = config["hostname"]
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
kinit_path_local = functions.get_kinit_path()
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs
)
if stack_version != "" and compare_versions(stack_version, '4.0') >= 0:
command_role = default("/role", "")
if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
daemon_script=format("/usr/iop/current/hbase-{role_root}/bin/hbase-daemon.sh")
region_mover = format("/usr/iop/current/hbase-{role_root}/bin/region_mover.rb")
region_drainer = format("/usr/iop/current/hbase-{role_root}/bin/draining_servers.rb")
hbase_cmd = format("/usr/iop/current/hbase-{role_root}/bin/hbase")
| {
"content_hash": "526dad7be093cb5f53235d936ccc8525",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 174,
"avg_line_length": 49.30102040816327,
"alnum_prop": 0.7522508537721204,
"repo_name": "arenadata/ambari",
"id": "97657ad7f491397add8546f8c2a49e7c7d42f170",
"size": "9685",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/params.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFER_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
crt = self._task.args.get('crt', None)
copy = self._task.args.get('copy', True)
creates = self._task.args.get('creates', None)
# this module requires at least the crt= to be present
if crt is None:
result['failed'] = True
result['msg'] = "crt is required"
return result
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not tmp:
tmp = self._make_tmp_path(remote_user)
# skip if creates= is added to the module and the destination file already exists
if creates:
result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
result['skipped'] = True
result['msg'] = "skipped, since %s exists" % creates
return result
crt = os.path.expanduser(crt)
# copy files
if copy:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', crt)
dest = tmp + os.path.basename(source)
self._connection.put_file(source, dest)
if self._play_context.become and self._play_context.become_user != 'root':
if not self._play_context.check_mode:
self._remote_chmod('a+r', dest)
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
crt=dest,
),
)
else:
new_module_args = self._task.args.copy()
# run keystore module
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
| {
"content_hash": "db7a57ba4ddb4af206aec4ff0d63fd87",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 114,
"avg_line_length": 33.26470588235294,
"alnum_prop": 0.5680813439434129,
"repo_name": "silpion/ansible-java",
"id": "9d76e44801712574ebbb07bc4de8119baba62bf5",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "action_plugins/keystore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9584"
},
{
"name": "Ruby",
"bytes": "3181"
},
{
"name": "Shell",
"bytes": "4296"
}
],
"symlink_target": ""
} |
extensions = [
'reno.sphinxext',
'openstackdocstheme',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/blazar'
openstackdocs_bug_project = 'blazar'
openstackdocs_bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = '2013-2022, Blazar developers'
# Release notes are version independent.
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BlazarReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'BlazarReleaseNotes.tex',
'Blazar Release Notes Documentation',
'Blazar developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'blazarreleasenotes', 'Blazar Release Notes Documentation',
['Blazar developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BlazarReleaseNotes', 'Blazar Release Notes Documentation',
'Blazar developers', 'BlazarReleaseNotes',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| {
"content_hash": "f708c462f2be0dabb6b1183e73a78ae2",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 31.733031674208146,
"alnum_prop": 0.7046912876087267,
"repo_name": "openstack/blazar",
"id": "fa7a94b2822028d0054fae721f53e37c8e5e5c38",
"size": "8568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "releasenotes/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "1165064"
},
{
"name": "Shell",
"bytes": "10357"
}
],
"symlink_target": ""
} |
"""
Process extraclusions on PUT of a single Tiddler.
Tiddler validation happens after bag policy checks
so we know we can write.
As long as this validator is late in the stack of
validators, other, security related, validators will
operate on the extraclude text.
"""
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.store import StoreError
from tiddlyweb.util import renderable
from tiddlyweb.web.validator import TIDDLER_VALIDATORS
import re
EXTRACLUDE_RE = re.compile(r'^.extraclude (.+?)\s*$([\s\S]*?)^.extraclude$',
re.MULTILINE)
EXTRACLUDE_TYPEMAP = {
'text/x-markdown': '{{%s}}',
'text/x-tiddlywiki': '<<tiddler [[%s]]>>',
}
def process_extraclusion(tiddler, environ):
"""
If the tiddler is renderable, look for
extraclusions and process.
"""
# return as quickly as possible if we don't need
if not renderable(tiddler, environ):
return
if '.extraclude' not in tiddler.text:
return
store = environ['tiddlyweb.store']
# ensure sane line endings. This might be happening elsewhere
# but we need to be _sure_.
text = tiddler.text.replace('\r', '')
def replace(match):
name = match.group(1)
extract = match.group(2)
new_tiddler = Tiddler(name, tiddler.bag)
new_tiddler.type = tiddler.type
new_tiddler.text = extract
new_tiddler.modifier = tiddler.modifier
new_tiddler.modified = tiddler.modified
store.put(new_tiddler)
return EXTRACLUDE_TYPEMAP[tiddler.type] % name
try:
text = EXTRACLUDE_RE.sub(replace, text)
except StoreError:
# bail out we could store, so don't update this tiddler
return
tiddler.text = text
def init(config):
TIDDLER_VALIDATORS.append(process_extraclusion)
| {
"content_hash": "b26b43d6cefcd27fbcabd4df66455453",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 28.078125,
"alnum_prop": 0.672787979966611,
"repo_name": "cdent/tiddlywebplugins.extraclude",
"id": "82d1d15c8d2f88f26eacf66dcda0876327e35bdd",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiddlywebplugins/extraclude.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5205"
}
],
"symlink_target": ""
} |
import roslib; roslib.load_manifest('behavior_driving_between_two_waypoints')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from behavior_set_points.set_points_sm import SetPointsSM
from behavior_simplemissiondriveto.simplemissiondriveto_sm import SimpleMissionDriveToSM
from flexbe_states.operator_decision_state import OperatorDecisionState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
from geometry_msgs.msg import PoseStamped
# [/MANUAL_IMPORT]
'''
Created on Thu Jun 02 2016
@author: Gabriel
'''
class DrivingbetweentwoWaypointsSM(Behavior):
'''
Simple mission structure
'''
def __init__(self):
super(DrivingbetweentwoWaypointsSM, self).__init__()
self.name = 'Driving between two Waypoints'
# parameters of this behavior
self.add_parameter('speed', 0.1)
self.add_parameter('allow_backwards', False)
# references to used behaviors
self.add_behavior(SetPointsSM, 'Set Points')
self.add_behavior(SimpleMissionDriveToSM, 'Drive To Start')
self.add_behavior(SimpleMissionDriveToSM, 'Drive To End')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:52 y:481, x:134 y:482
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.pose = PoseStamped()
_state_machine.userdata.startPoint = PoseStamped()
_state_machine.userdata.endPoint = PoseStamped()
_state_machine.userdata.switchFalse = False
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:32 y:58
OperatableStateMachine.add('Set Points',
self.use_behavior(SetPointsSM, 'Set Points'),
transitions={'finished': 'Operator_Drive'},
autonomy={'finished': Autonomy.Inherit},
remapping={'startPoint': 'startPoint', 'endPoint': 'endPoint'})
# x:68 y:172
OperatableStateMachine.add('Drive To Start',
self.use_behavior(SimpleMissionDriveToSM, 'Drive To Start'),
transitions={'finished': 'Drive To End', 'failed': 'Drive To Start'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pose': 'startPoint'})
# x:287 y:59
OperatableStateMachine.add('Operator_Drive',
OperatorDecisionState(outcomes=['toStart', 'toEnd'], hint='Operator Drive', suggestion=None),
transitions={'toStart': 'Drive To Start', 'toEnd': 'Drive To End'},
autonomy={'toStart': Autonomy.Off, 'toEnd': Autonomy.Off})
# x:468 y:172
OperatableStateMachine.add('Drive To End',
self.use_behavior(SimpleMissionDriveToSM, 'Drive To End'),
transitions={'finished': 'Drive To Start', 'failed': 'Drive To End'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pose': 'endPoint'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| {
"content_hash": "ca709d775ef7832e014cd8801f92817b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 115,
"avg_line_length": 33.73118279569893,
"alnum_prop": 0.7054510678992668,
"repo_name": "tu-darmstadt-ros-pkg/hector_flexbe_behavior",
"id": "57bbabb7ba95604632a23eb147679441a439ba4a",
"size": "3520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behaviors/behavior_driving_between_two_waypoints/src/behavior_driving_between_two_waypoints/driving_between_two_waypoints_sm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "20083"
},
{
"name": "Python",
"bytes": "191703"
}
],
"symlink_target": ""
} |
"""Tests for the McAfee AV Logs file event formatter."""
import unittest
from plaso.formatters import mcafeeav
from tests.formatters import test_lib
class McafeeAccessProtectionLogEventFormatterTest(
test_lib.EventFormatterTestCase):
"""Tests for the McAfee Access Protection Log event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = mcafeeav.McafeeAccessProtectionLogEventFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = mcafeeav.McafeeAccessProtectionLogEventFormatter()
expected_attribute_names = [
u'filename',
u'username',
u'trigger_location',
u'status',
u'rule',
u'action']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "17ae73bbd05cda53c1417f5b538fbb81",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 26.763157894736842,
"alnum_prop": 0.7168141592920354,
"repo_name": "dc3-plaso/plaso",
"id": "b85f9459431346e88bd392b07ba508a384357f54",
"size": "1059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/formatters/mcafeeav.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
} |
"""SDK Fn Harness entry point."""
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
# This module is experimental. No backwards-compatibility guarantees.
class StatusServer(object):
@classmethod
def get_thread_dump(cls):
lines = []
frames = sys._current_frames() # pylint: disable=protected-access
for t in threading.enumerate():
lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name))
lines.append(''.join(traceback.format_stack(frames[t.ident])))
return lines
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line)
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
logging.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
logging.info('Logging handler created.')
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
logging.info('semi_persistent_directory: %s', semi_persistent_directory)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
logging.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
logging.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
service_descriptor)
# TODO(robertwb): Support credentials.
assert not service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=service_descriptor.url,
worker_count=_get_worker_count(sdk_pipeline_options)).run()
logging.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
logging.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_worker_count(pipeline_options):
"""Extract worker count from the pipeline_options.
This defines how many SdkWorkers will be started in this Python process.
And each SdkWorker will have its own thread to process data. Name of the
experimental parameter is 'worker_threads'
Example Usage in the Command Line:
--experimental worker_threads=1
Note: worker_threads is an experimental flag and might not be available in
future releases.
Returns:
an int containing the worker_threads to use. Default is 1
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'worker_threads=', experiment):
return int(
re.match(r'worker_threads=(?P<worker_threads>.*)',
experiment).group('worker_threads'))
return 12
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
logging.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
logging.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "30c4f8032a4f80cfd96a721256d3c006",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 76,
"avg_line_length": 33.89340101522843,
"alnum_prop": 0.6973191553092706,
"repo_name": "rangadi/incubator-beam",
"id": "da21418e4cc30848b8f7700b5a9b5600b6c7f694",
"size": "7461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/worker/sdk_worker_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "50057"
},
{
"name": "Java",
"bytes": "11779709"
},
{
"name": "Protocol Buffer",
"bytes": "55082"
},
{
"name": "Python",
"bytes": "2864316"
},
{
"name": "Shell",
"bytes": "44966"
}
],
"symlink_target": ""
} |
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import function
from tensorflow.python.eager import profiler
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import gradient_descent
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", name,
ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
class SubclassedKerasModel(keras.Model):
def __init__(self, initializer="ones"):
super(SubclassedKerasModel, self).__init__()
self.layer_a = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_b = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_c = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_d = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_e = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")
def call(self, x):
x = self.layer_a(x)
x = self.layer_b(x)
x = self.layer_c(x)
x = self.layer_d(x)
return self.layer_e(x)
def make_keras_model(initializer="ones"):
model_input = keras.Input(shape=(10,))
x = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
x = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
return keras.Model(inputs=model_input, outputs=x)
def make_sequential_keras_model(initializer="ones"):
model = keras.models.Sequential()
model.add(keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros",
input_shape=(10,)))
model.add(keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"))
return model
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 30000
def _run(self, func, num_iters, execution_mode=None):
# call func to maybe warm up the GPU
ctx = context.context()
with context.execution_mode(execution_mode):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_constant(self):
func = lambda: constant_op.constant(3.0)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def benchmark_index_tensor_with_literal(self):
func = lambda: constant_op.constant([3.0])[0]
self._run(func, 30000)
def benchmark_index_tensor_with_tensor(self):
func = lambda idx=constant_op.constant(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def benchmark_index_tensor_with_np_array(self):
func = lambda idx=np.array(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_nested_defun_matmul(self, m, transpose_b, num_iters):
inner = function.defun(math_ops.matmul)
@function.defun
def outer(a, b, c, transpose_b):
return math_ops.matmul(inner(a, b, transpose_b=transpose_b), c)
func = lambda: outer(m, m, m, transpose_b=transpose_b)
# Warmup before benchmark
for _ in range(1000):
func()
self._run(func, num_iters)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_nested_defun_matmul_2_by_2(self):
m = self._m_2_by_2.cpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_nested_defun_matmul_100_by_784(self):
m = self._m_100_by_784.gpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_sequential(self):
model = make_sequential_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_functional
func()
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def _benchmark_keras_model_fit(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_evaluate(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.evaluate(dataset, steps=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_predict(self, model, run_eagerly=False):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors(tuple([data])).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse", run_eagerly=run_eagerly)
func = lambda: model.predict(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.predict(dataset, steps=1, verbose=0)
self._run(func, 1)
def benchmark_keras_model_subclassed_fit(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_graph_mode(self):
with context.graph_mode():
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode(self):
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self):
profiler.start()
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
result = profiler.stop()
assert result is not None
def benchmark_keras_model_functional_fit_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit_run_model_eagerly_with_profiler(
self):
profiler.start()
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
result = profiler.stop()
assert result is not None
def benchmark_keras_model_sequential_fit(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_graph_mode(self):
with context.graph_mode():
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_subclassed_evaluate(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_evaluate_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_functional_evaluate(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_sequential_evaluate(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_subclassed_predict(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_subclassed_predict_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_functional_predict(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_sequential_predict(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
if __name__ == "__main__":
test.main()
| {
"content_hash": "e7f8923a1ae3e228a54edecfe038724e",
"timestamp": "",
"source": "github",
"line_count": 907,
"max_line_length": 80,
"avg_line_length": 35.65270121278942,
"alnum_prop": 0.658007854779355,
"repo_name": "jbedorf/tensorflow",
"id": "8a1319f9efa847911b799cb54007f5971973ebb2",
"size": "33026",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/benchmarks_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "647467"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59799751"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1508512"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46379626"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "480235"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
from pprint import pprint
from st2actions.runners.pythonrunner import Action
class PrintConfigAction(Action):
def run(self):
print('=========')
pprint(self.config)
print('=========')
| {
"content_hash": "5a36752e3da0765b3b4ccae21e69ad47",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 50,
"avg_line_length": 21.4,
"alnum_prop": 0.6121495327102804,
"repo_name": "emedvedev/st2",
"id": "f65d393d36abfb5232aa86229925a4a47f722b03",
"size": "214",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "contrib/examples/actions/print_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41694"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3717722"
},
{
"name": "Shell",
"bytes": "38637"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from backend.forms import PraiseForm
from backend.models import Praise
import random
from django.views.generic.edit import UpdateView
def home(request):
if request.user.is_authenticated ():
praise = random.choice(Praise.objects.filter(user=request.user))
return render (request, "home.html",{'praise': praise})
else:
return redirect ("/admin/login/?next=/")
#delete a praise
def delete(request, praise_id):
praise = Praise.objects.get(id=praise_id)
praise.delete()
return render(request, "delete.html", {'praise': praise})
#edit a praise
def edit(request, praise_id):
praise = Praise.objects.get(id=praise_id)
#process form data is a 'POST'
#show a form is 'GET'
if request.method == 'POST':
form = PraiseForm(request.POST,instance=praise)
if form.is_valid():
#save the new category to the database.
form.save()
#user will be shown the homepage
return redirect ("/")
else:
#prints that the form was not pushed
print form.errors
else: #If the request was not a POST, the form will display
form = PraiseForm(instance=praise)
return render(request, "edit.html", {'form':form})
def add(request):
#process form data is a 'POST'
#show a form is 'GET'
if request.method == 'POST':
form = PraiseForm(request.POST)
if form.is_valid():
#save the new category to the database.
praise = form.save(commit=False)
praise.user = request.user
praise.save()
#user will be shown the homepage
return redirect ("/")
else:
#prints that the form was not pushed
print form.errors
else: #If the request was not a POST, the form will display
form = PraiseForm
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render(request, 'add.html', {'form':form})
# Create your views here.
| {
"content_hash": "44438e97031046664074fdca790738e6",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 66,
"avg_line_length": 26.816901408450704,
"alnum_prop": 0.7048319327731093,
"repo_name": "SEACodeCarrots/PraiseReminder",
"id": "2b66c7374b23d1b61b1562ae868681324e838550",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviver/backend/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3931"
},
{
"name": "HTML",
"bytes": "6213"
},
{
"name": "JavaScript",
"bytes": "366"
},
{
"name": "Python",
"bytes": "11038"
}
],
"symlink_target": ""
} |
from .base import ManagerApi
from maxipago.requesters.customer import CustomerRequester
from maxipago.resources.customer import CustomerAddResource, CustomerDeleteResource, CustomerUpdateResource
class CustomerManager(ManagerApi):
def add(self, **kwargs):
fields = (
('customer_id', {'translated_name': 'customerIdExt'}),
('first_name', {'translated_name': 'firstName'}),
('last_name', {'translated_name': 'lastName'}),
('address1', {'required': False}),
('address2', {'required': False}),
('city', {'required': False}),
('state', {'required': False}),
('zip_code', {'required': False, 'translated_name': 'zip'}),
('phone', {'required': False}),
('email', {'required': False}),
('birth_date', {'required': False, 'translated_name': 'dob'}),
('ssn', {'required': False}),
('sex', {'required': False}),
)
requester = CustomerRequester(fields, kwargs)
return self.send(command='add-consumer', requester=requester, resource=CustomerAddResource)
def delete(self, **kwargs):
fields = (
('id', {'translated_name': 'customerId'}),
)
requester = CustomerRequester(fields, kwargs)
return self.send(command='delete-consumer', requester=requester, resource=CustomerDeleteResource)
def update(self, **kwargs):
fields = (
('id', {'translated_name': 'customerId'}),
('customer_id', {'translated_name': 'customerIdExt'}),
('first_name', {'required': False, 'blank': True, 'translated_name': 'firstName'}),
('last_name', {'required': False, 'blank': True, 'translated_name': 'lastName'}),
('address1', {'required': False, 'blank': True}),
('address2', {'required': False, 'blank': True}),
('city', {'required': False, 'blank': True}),
('state', {'required': False, 'blank': True}),
('zip_code', {'required': False, 'blank': True, 'translated_name': 'zip'}),
('phone', {'required': False, 'blank': True}),
('email', {'required': False, 'blank': True}),
('birth_date', {'required': False, 'blank': True, 'translated_name': 'dob'}),
('ssn', {'required': False, 'blank': True}),
('sex', {'required': False, 'blank': True}),
)
requester = CustomerRequester(fields, kwargs)
return self.send(command='update-consumer', requester=requester, resource=CustomerUpdateResource)
| {
"content_hash": "932c4bf43874ff14ebe8ce76150a4b6a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 107,
"avg_line_length": 49.96153846153846,
"alnum_prop": 0.5619707467282525,
"repo_name": "maxipago/Python-integration-lib",
"id": "6848b52e89acdc316f5c07ed0a60be57be21e358",
"size": "2614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maxipago/managers/customer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46107"
}
],
"symlink_target": ""
} |
"""
Rasterplots
Utilities for raster image manipulation (e.g. OR maps) using
PIL/Pillow and Numpy. Used for visualizing orientation maps (with and
without selectivity), polar FFT spectra and afferent model weight
patterns.
"""
import Image
import ImageOps
import numpy as np
import colorsys
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
def black_selectivity(image, whitelevel=0.2):
"""
Makes zero selectivity black for publication. Swaps saturation and
value and scales saturation by the whitelevel.
"""
whitefactor = 1.0 / whitelevel # 25% multiplies by 4.0
image_rgba = image.convert('RGBA')
arr = np.asarray(image_rgba).astype('float')
r, g, b, a = np.rollaxis(arr, axis=-1)
h, s, v = rgb_to_hsv(r, g, b) # s is [0,1] all v are 255.0
s *= (255.0 * whitefactor)
r, g, b = hsv_to_rgb(h, (v / 255.0), np.clip(s, 0, 255.0))
arr_stack = np.dstack((r, g, b, a))
return Image.fromarray(arr_stack.astype('uint8'), 'RGBA')
def OR_map(preference, selectivity=None):
"""
Supply the raw preference and (optionally) selectivity. Note that
selectivity multiplier affects the raw selectivity data and is
therefore automatically applied.
"""
shape = preference.shape
if selectivity is None:
selectivity = np.ones(shape, dtype=np.float64)
else:
assert preference.shape == selectivity.shape, \
"Preference and selectivity shapes must match."
value = np.ones(shape, dtype=np.int64) * 255
channels = (preference, selectivity, value)
rgb_channels = hsv_to_rgb(*channels)
arr_stack = np.dstack(rgb_channels)
return Image.fromarray(arr_stack.astype('uint8'), 'RGB')
def greyscale(arr):
"""
Converts a numpy 2D array of floats between 0.0 and 1.0 to a PIL
greyscale image.
"""
return Image.fromarray(np.uint8(arr*255))
def cf_image(cfs, coords, width=None, height=None, pos=(0,0),
size=26, border=5, bg=(0,0,0), colmap=None):
"""
Returns a PIL image showing the selected connection fields (CFS)
as supplied by extract_CFs. Does not support non-square CF
shapes.
'cfs' is an ndarray of N dstacked cfs, each of shape (X,X): (X,X,N)
'coords' is an ndarray of N coordinates: (N,2)
'width' and 'height' are either None (full) of integer grid sizes
'pos' is the starting position of the block, (x,y)
'size' and 'border' are the cf image size and the border size in pixels.
'colmap' is an RGB array shape (N,M,3) with values between 0.0 and 1.0.
"""
normalize = lambda arr: (arr - arr.min()) / (arr.max() - arr.min())
cf_im = lambda cf, size: greyscale(normalize(cf)).resize((size,size),
Image.NEAREST)
(posx, posy) = pos
(d1,d2) = zip(*coords)
density = len(set(d1))
assert density == len(set(d2)), "Not implemented for non-square sets"
height = density if height is None else height
width = density if width is None else width
assert height>0 and width>0, "Height and width must be None or greater than zero"
assert posx+width <= density, "X position and width greater than density"
assert posy+height <= density, "Y position and width greater than density"
# Functions mapping original coordinates onto consecutive grid indices
fst_map = dict(((ind,i) for (i,ind) in enumerate(sorted(set(d1)))))
snd_map = dict(((ind,i) for (i,ind) in enumerate(sorted(set(d2)))))
# Generating a dictionary from the grid coordinates to the CF index
mapped_coords = [(fst_map[fst],snd_map[snd]) for [fst,snd] in coords]
indexed_coords = dict((coord,i) for (i, coord) in enumerate(mapped_coords))
# Initialising the image
imwidth = width*size+(width-1)*border
imheight = height*size+(height-1)*border
cf_block = Image.new('RGB', (imwidth, imheight), bg)
# Building image row by row, top to bottom.
for yind in range(height):
for xind in range(width):
# Swapped coordinate system
cf_ind = indexed_coords[(yind+posy, xind+posx)]
# Get color from the color map if available
if colmap is not None:
crd1, crd2 = coords[cf_ind]
r,g,b = colmap[crd1, crd2, :]
color = (r*255,g*255,b*255)
else:
color = (255, 255, 255)
cf = cfs[:,:,cf_ind]
(cf_dim1, cf_dim2) = cf.shape
assert cf_dim1 == cf_dim2, "Only supports square CFs."
cf_image = ImageOps.colorize(cf_im(cf, size), (0, 0, 0, 0), color)
xoffset = xind*border
yoffset = yind*border
paste_coord = (xoffset+xind*size, yoffset+yind*size)
cf_block.paste(cf_image, paste_coord)
return cf_block
def resize(image, size, filter_type=Image.NEAREST):
"""
Resizes the given image to the given size using the specified
filter. Default is box filter (no interpolation) appropriate for
simulated orientation maps.
"""
return image.resize(size, filter_type)
#########################
# DEPRACATION FUNCTIONS #
#########################
def greyscale_image(array2D, normalize=False, scale_factor=255):
raise Exception("Use greyscale instead")
#########################
#########################
#########################
| {
"content_hash": "36d6c08ae2c23da4cbe63e74e99facd1",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 85,
"avg_line_length": 37.72027972027972,
"alnum_prop": 0.6245828698553949,
"repo_name": "ioam/topographica",
"id": "7a0a5e77c99ae356720972804a9813a19658c226",
"size": "5394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "models/stevens.jn13/jn13_figures/lib/rasterplots.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "14889"
},
{
"name": "C++",
"bytes": "5714"
},
{
"name": "Elixir",
"bytes": "202"
},
{
"name": "JavaScript",
"bytes": "122"
},
{
"name": "Jupyter Notebook",
"bytes": "7580101"
},
{
"name": "Makefile",
"bytes": "15490"
},
{
"name": "Python",
"bytes": "1681956"
},
{
"name": "Shell",
"bytes": "1577"
},
{
"name": "TeX",
"bytes": "253834"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_link_hubs_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkHubsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.synapse.aio.SynapseManagementClient`'s
:attr:`private_link_hubs` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateLinkHub"]:
"""Returns a list of privateLinkHubs in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkHub or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.PrivateLinkHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[_models.PrivateLinkHubInfoListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkHubInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/privateLinkHubs"
}
@distributed_trace_async
async def get(self, resource_group_name: str, private_link_hub_name: str, **kwargs: Any) -> _models.PrivateLinkHub:
"""Gets a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[_models.PrivateLinkHub] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkHub", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/privateLinkHubs/{privateLinkHubName}"
}
@overload
async def update(
self,
resource_group_name: str,
private_link_hub_name: str,
private_link_hub_patch_info: _models.PrivateLinkHubPatchInfo,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateLinkHub:
"""Updates a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:param private_link_hub_patch_info: PrivateLinkHub patch request properties. Required.
:type private_link_hub_patch_info: ~azure.mgmt.synapse.models.PrivateLinkHubPatchInfo
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
private_link_hub_name: str,
private_link_hub_patch_info: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateLinkHub:
"""Updates a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:param private_link_hub_patch_info: PrivateLinkHub patch request properties. Required.
:type private_link_hub_patch_info: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
private_link_hub_name: str,
private_link_hub_patch_info: Union[_models.PrivateLinkHubPatchInfo, IO],
**kwargs: Any
) -> _models.PrivateLinkHub:
"""Updates a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:param private_link_hub_patch_info: PrivateLinkHub patch request properties. Is either a model
type or a IO type. Required.
:type private_link_hub_patch_info: ~azure.mgmt.synapse.models.PrivateLinkHubPatchInfo or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateLinkHub] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(private_link_hub_patch_info, (IO, bytes)):
_content = private_link_hub_patch_info
else:
_json = self._serialize.body(private_link_hub_patch_info, "PrivateLinkHubPatchInfo")
request = build_update_request(
resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("PrivateLinkHub", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateLinkHub", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/privateLinkHubs/{privateLinkHubName}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
private_link_hub_name: str,
private_link_hub_info: _models.PrivateLinkHub,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateLinkHub:
"""Creates or updates a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:param private_link_hub_info: PrivateLinkHub create or update request properties. Required.
:type private_link_hub_info: ~azure.mgmt.synapse.models.PrivateLinkHub
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
private_link_hub_name: str,
private_link_hub_info: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateLinkHub:
"""Creates or updates a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:param private_link_hub_info: PrivateLinkHub create or update request properties. Required.
:type private_link_hub_info: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
private_link_hub_name: str,
private_link_hub_info: Union[_models.PrivateLinkHub, IO],
**kwargs: Any
) -> _models.PrivateLinkHub:
"""Creates or updates a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:param private_link_hub_info: PrivateLinkHub create or update request properties. Is either a
model type or a IO type. Required.
:type private_link_hub_info: ~azure.mgmt.synapse.models.PrivateLinkHub or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkHub or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.PrivateLinkHub
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateLinkHub] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(private_link_hub_info, (IO, bytes)):
_content = private_link_hub_info
else:
_json = self._serialize.body(private_link_hub_info, "PrivateLinkHub")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("PrivateLinkHub", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateLinkHub", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/privateLinkHubs/{privateLinkHubName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, private_link_hub_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/privateLinkHubs/{privateLinkHubName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, private_link_hub_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a privateLinkHub.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param private_link_hub_name: Name of the privateLinkHub. Required.
:type private_link_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/privateLinkHubs/{privateLinkHubName}"
}
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.PrivateLinkHub"]:
"""Returns a list of privateLinkHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkHub or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.PrivateLinkHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[_models.PrivateLinkHubInfoListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkHubInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Synapse/privateLinkHubs"}
| {
"content_hash": "266db2b6499666adfe0601f9b8838cb1",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 148,
"avg_line_length": 44.651785714285715,
"alnum_prop": 0.6438378990868493,
"repo_name": "Azure/azure-sdk-for-python",
"id": "59210109ff6d441116381ce94a8035f25a86752e",
"size": "30506",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_private_link_hubs_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
import os
import re
import imp
import sys
import warnings
DEFAULT_SETTINGS = \
'''
{
// If you want exceptions reraised so you can see them in the console, change this to true.
"reraise_exceptions": false,
// If you want to have a syntax applied when new files are created, set new_file_syntax to the name of the syntax to use.
// The format is exactly the same as "name" in the rules below. For example, if you want to have a new file use
// JavaScript syntax, set new_file_syntax to 'JavaScript'.
"new_file_syntax": false,
// Put your custom syntax rules here:
"syntaxes": [
]
}
'''
DEPRECATED_SHORT_SYNTAX = '''ApplySyntax:
Deprecated Call: %s
Short format of syntax file path has been deprecated in order to ease confusion. A consistent format is being used now in all cases. Please use the long form from now on: "name": <Package Name>/<Path to syntax file>/<File name (do not need .tmLanguage)>
'''
DEPRECATED_SHORT_FUNCTION = '''ApplySyntax:
Deprecated Call: %s
This call will be skipped.
Short format of function rules has been deprecated in order to ease confusion. A consistent format is being used now in all cases. Please use the long form from now on: {"function": {"name": <Name of function>, "source": <Package Name>/<Path to syntax file>/<File name (do not need .py)>}}
'''
def sublime_format_path(pth):
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m != None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
def log(msg):
print("ApplySyntax: %s" % msg)
def debug(msg):
if bool(sublime.load_settings('ApplySyntax.sublime-settings').get("debug_enabled", False)):
log(msg)
class ApplySyntaxCommand(sublime_plugin.EventListener):
def __init__(self):
self.first_line = None
self.file_name = None
self.entire_file = None
self.view = None
self.syntaxes = []
self.plugin_name = 'ApplySyntax'
self.plugin_dir = os.path.join(sublime.packages_path(), self.plugin_name)
self.settings_file = self.plugin_name + '.sublime-settings'
self.reraise_exceptions = False
def get_setting(self, name, default = None):
plugin_settings = sublime.load_settings(self.settings_file)
active_settings = self.view.settings() if self.view else {}
return active_settings.get(name, plugin_settings.get(name, default))
def on_new(self, view):
self.ensure_user_settings()
name = self.get_setting("new_file_syntax")
if name:
self.view = view
self.set_syntax(name)
def on_load(self, view):
self.detect_syntax(view)
def on_post_save(self, view):
self.detect_syntax(view)
def detect_syntax(self, view):
if view.is_scratch() or not view.file_name: # buffer has never been saved
return
self.reset_cache_variables(view)
self.load_syntaxes()
if not self.syntaxes:
return
for syntax in self.syntaxes:
# stop on the first syntax that matches
if self.syntax_matches(syntax):
self.set_syntax(syntax.get("name"))
break
def reset_cache_variables(self, view):
self.view = view
self.file_name = view.file_name()
self.first_line = None # we read the first line only when needed
self.entire_file = None # we read the contents of the entire file only when needed
self.syntaxes = []
self.reraise_exceptions = False
def fetch_first_line(self):
self.first_line = self.view.substr(self.view.line(0)) # load the first line only when needed
def fetch_entire_file(self):
self.entire_file = self.view.substr(sublime.Region(0, self.view.size())) # load file only when needed
def set_syntax(self, name):
# the default settings file uses / to separate the syntax name parts, but if the user
# is on windows, that might not work right. And if the user happens to be on Mac/Linux but
# is using rules that were written on windows, the same thing will happen. So let's
# be intelligent about this and replace / and \ with os.path.sep to get to
# a reasonable starting point
if not isinstance(name, list):
names = [name]
else:
names = name
for n in names:
path = os.path.dirname(n)
name = os.path.basename(n)
if not path:
sublime.error_message(DEPRECATED_SHORT_SYNTAX % name)
path = name
file_name = name + '.tmLanguage'
new_syntax = sublime_format_path(os.path.join("Packages", path, file_name))
file_path = os.path.join(sublime.packages_path(), path, file_name)
current_syntax = self.view.settings().get('syntax')
# only set the syntax if it's different
if new_syntax != current_syntax:
# let's make sure it exists first!
if os.path.exists(file_path):
self.view.set_syntax_file(new_syntax)
log('Syntax set to ' + name + ' using ' + new_syntax)
break
else:
log('Syntax file for ' + name + ' does not exist at ' + new_syntax)
else:
log('Syntax already set to ' + new_syntax)
break
def load_syntaxes(self):
self.ensure_user_settings()
settings = sublime.load_settings(self.settings_file)
self.reraise_exceptions = settings.get("reraise_exceptions")
# load the default syntaxes
default_syntaxes = self.get_setting("default_syntaxes", [])
# load any user-defined syntaxes
user_syntaxes = self.get_setting("syntaxes", [])
# load any project-defined syntaxes
project_syntaxes = self.get_setting("project_syntaxes", [])
self.syntaxes = project_syntaxes + user_syntaxes + default_syntaxes
def syntax_matches(self, syntax):
rules = syntax.get("rules")
match_all = syntax.get("match") == 'all'
for rule in rules:
if 'function' in rule:
result = self.function_matches(rule)
else:
result = self.regexp_matches(rule)
if match_all:
# can return on the first failure since they all
# have to match
if not result:
return False
elif result:
# return on first match. don't return if it doesn't
# match or else the remaining rules won't be applied
return True
if match_all:
# if we need to match all and we got here, then all of the
# rules matched
return True
else:
# if we needed to match just one and got here, none of the
# rules matched
return False
def get_function(self, path_to_file, function_name):
try:
path_name = sublime_format_path(path_to_file.replace(sublime.packages_path(), ''))
module_name = os.path.splitext(path_name)[0].replace('/', '.')
with warnings.catch_warnings(record=True) as w:
# Ignore warnings about plugin folder not being a python package
warnings.simplefilter("always")
module = imp.new_module(module_name)
w = filter(lambda i: issubclass(i.category, UserWarning), w)
sys.modules[module_name] = module
with open(path_to_file, "r") as f:
source = f.read()
self.execute_function(source, module_name)
function = getattr(module, function_name)
except:
if self.reraise_exceptions:
raise
else:
function = None
return function
def execute_function(self, source, module_name):
exec(compile(source, module_name, 'exec'), sys.modules[module_name].__dict__)
def function_matches(self, rule):
function = rule.get("function")
path_to_file = function.get("source")
function_name = function.get("name")
if not path_to_file or path_to_file.lower().endswith(".py"):
sublime.error_message(DEPRECATED_SHORT_FUNCTION % path_to_file)
return False
path_to_file = os.path.join(sublime.packages_path(), path_to_file + '.py')
function = self.get_function(path_to_file, function_name)
if function is None:
# can't find it ... nothing more to do
return False
try:
return function(self.file_name)
except:
if self.reraise_exceptions:
raise
else:
return False
def regexp_matches(self, rule):
from_beginning = True # match only from the beginning or anywhere in the string
if "first_line" in rule:
if self.first_line is None:
self.fetch_first_line()
subject = self.first_line
regexp = rule.get("first_line")
elif "binary" in rule:
if self.first_line is None:
self.fetch_first_line()
subject = self.first_line
regexp = '^#\\!(?:.+)' + rule.get("binary")
elif "file_name" in rule:
subject = self.file_name
regexp = rule.get("file_name")
elif "contains" in rule:
if self.entire_file is None:
self.fetch_entire_file()
subject = self.entire_file
regexp = rule.get("contains")
from_beginning = False # requires us to match anywhere in the file
else:
return False
if regexp and subject:
if from_beginning:
result = re.match(regexp, subject)
else:
result = re.search(regexp, subject) # matches anywhere, not only from the beginning
return result is not None
else:
return False
def ensure_user_settings(self):
user_settings_file = os.path.join(sublime.packages_path(), 'User', self.settings_file)
if os.path.exists(user_settings_file):
return
# file doesn't exist, let's create a bare one
with open(user_settings_file, 'w') as f:
f.write(DEFAULT_SETTINGS)
| {
"content_hash": "b85be21d5facba847d6b4e076d3712ed",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 291,
"avg_line_length": 36.32302405498282,
"alnum_prop": 0.5855250709555345,
"repo_name": "dostavro/dotfiles",
"id": "f1ec8d928d91b4ef344f90a76526808473a6c8dd",
"size": "10570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sublime2/Packages/ApplySyntax/ApplySyntax.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Utilities used in tornado."""
import socket
import errno
from tornado import ioloop
class TCPHandler(object):
"""TCP socket handler backed tornado event loop.
Parameters
----------
sock : Socket
The TCP socket, will set it to non-blocking mode.
"""
def __init__(self, sock):
self._sock = sock
self._ioloop = ioloop.IOLoop.current()
self._sock.setblocking(0)
self._pending_write = []
self._signal_close = False
def _event_handler(_, events):
self._event_handler(events)
self._ioloop.add_handler(
self._sock.fileno(), _event_handler,
self._ioloop.READ | self._ioloop.ERROR)
def signal_close(self):
"""Signal the handler to close.
The handler will be closed after the existing
pending message are sent to the peer.
"""
if not self._pending_write:
self.close()
else:
self._signal_close = True
def close(self):
"""Close the socket"""
if self._sock is not None:
try:
self._ioloop.remove_handler(self._sock.fileno())
self._sock.close()
except socket.error:
pass
self._sock = None
self.on_close()
def write_message(self, message, binary=True):
assert binary
if self._sock is None:
raise IOError("socket is already closed")
self._pending_write.append(message)
self._update_write()
def _event_handler(self, events):
"""centeral event handler"""
if (events & self._ioloop.ERROR) or (events & self._ioloop.READ):
if self._update_read() and (events & self._ioloop.WRITE):
self._update_write()
elif events & self._ioloop.WRITE:
self._update_write()
def _update_write(self):
"""Update the state on write"""
while self._pending_write:
try:
msg = self._pending_write[0]
if self._sock is None:
return
nsend = self._sock.send(msg)
if nsend != len(msg):
self._pending_write[0] = msg[nsend:]
else:
self._pending_write.pop(0)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
else:
self.on_error(err)
if self._pending_write:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR | self._ioloop.WRITE)
else:
if self._signal_close:
self.close()
else:
self._ioloop.update_handler(
self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR)
def _update_read(self):
"""Update state when there is read event"""
try:
msg = bytes(self._sock.recv(4096))
if msg:
self.on_message(msg)
return True
# normal close, remote is closed
self.close()
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
pass
else:
self.on_error(err)
return False
| {
"content_hash": "34e1aef5291ebeb3e6b6ef383d20a3c9",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 97,
"avg_line_length": 32.39047619047619,
"alnum_prop": 0.5142605116142311,
"repo_name": "Huyuwei/tvm",
"id": "b7b16188a5f6dfbe926556335b535f4ebc541b88",
"size": "4186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tvm/rpc/tornado_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
"""
Script to draw skimage logo using Scipy logo as stencil. The easiest
starting point is the `plot_colorized_logo`.
Original snake image from pixabay [1]_
.. [1] http://pixabay.com/en/snake-green-toxic-close-yellow-3237/
"""
import sys
if len(sys.argv) != 2 or sys.argv[1] != '--no-plot':
print("Run with '--no-plot' flag to generate logo silently.")
else:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import skimage.io as sio
from skimage import img_as_float
from skimage.color import gray2rgb, rgb2gray
from skimage.exposure import rescale_intensity
from skimage.filter import sobel
import scipy_logo
# Utility functions
# =================
def colorize(image, color, whiten=False):
"""Return colorized image from gray scale image.
The colorized image has values from ranging between black at the lowest
intensity to `color` at the highest. If `whiten=True`, then the color
ranges from `color` to white.
"""
color = np.asarray(color)[np.newaxis, np.newaxis, :]
image = image[:, :, np.newaxis]
if whiten:
# truncate and stretch intensity range to enhance contrast
image = rescale_intensity(image, in_range=(0.3, 1))
return color * (1 - image) + image
else:
return image * color
def prepare_axes(ax):
plt.sca(ax)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
for spine in ax.spines.values():
spine.set_visible(False)
# Logo generating classes
# =======================
class LogoBase(object):
def __init__(self):
self.logo = scipy_logo.ScipyLogo(radius=self.radius)
self.mask_1 = self.logo.get_mask(self.image.shape, 'upper left')
self.mask_2 = self.logo.get_mask(self.image.shape, 'lower right')
edges = np.array([sobel(img) for img in self.image.T]).T
# truncate and stretch intensity range to enhance contrast
self.edges = rescale_intensity(edges, in_range=(0, 0.4))
def _crop_image(self, image):
w = 2 * self.radius
x, y = self.origin
return image[y:y + w, x:x + w]
def plot_curve(self, **kwargs):
self.logo.plot_snake_curve(**kwargs)
class SnakeLogo(LogoBase):
radius = 250
origin = (420, 0)
def __init__(self):
image = sio.imread('data/snake_pixabay.jpg')
image = self._crop_image(image)
self.image = img_as_float(image)
LogoBase.__init__(self)
snake_color = SnakeLogo()
snake = SnakeLogo()
# turn RGB image into gray image
snake.image = rgb2gray(snake.image)
snake.edges = rgb2gray(snake.edges)
# Demo plotting functions
# =======================
def plot_colorized_logo(logo, color, edges='light', whiten=False):
"""Convenience function to plot artificially-colored logo.
The upper-left half of the logo is an edge filtered image, while the
lower-right half is unfiltered.
Parameters
----------
logo : LogoBase instance
color : length-3 sequence of floats or 2 length-3 sequences
RGB color spec. Float values should be between 0 and 1.
edges : {'light'|'dark'}
Specifies whether Sobel edges are drawn light or dark
whiten : bool or 2 bools
If True, a color value less than 1 increases the image intensity.
"""
if not hasattr(color[0], '__iter__'):
color = [color] * 2 # use same color for upper-left & lower-right
if not hasattr(whiten, '__iter__'):
whiten = [whiten] * 2 # use same setting for upper-left & lower-right
image = gray2rgb(np.ones_like(logo.image))
mask_img = gray2rgb(logo.mask_2)
mask_edge = gray2rgb(logo.mask_1)
# Compose image with colorized image and edge-image.
if edges == 'dark':
logo_edge = colorize(1 - logo.edges, color[0], whiten=whiten[0])
else:
logo_edge = colorize(logo.edges, color[0], whiten=whiten[0])
logo_img = colorize(logo.image, color[1], whiten=whiten[1])
image[mask_img] = logo_img[mask_img]
image[mask_edge] = logo_edge[mask_edge]
logo.plot_curve(lw=5, color='w') # plot snake curve on current axes
plt.imshow(image)
if __name__ == '__main__':
# Colors to use for the logo:
red = (1, 0, 0)
blue = (0.35, 0.55, 0.85)
green_orange = ((0.6, 0.8, 0.3), (1, 0.5, 0.1))
def plot_all():
color_list = [red, blue, green_orange]
edge_list = ['light', 'dark']
f, axes = plt.subplots(nrows=len(edge_list), ncols=len(color_list))
for axes_row, edges in zip(axes, edge_list):
for ax, color in zip(axes_row, color_list):
prepare_axes(ax)
plot_colorized_logo(snake, color, edges=edges)
plt.tight_layout()
def plot_official_logo():
f, ax = plt.subplots()
prepare_axes(ax)
plot_colorized_logo(snake, green_orange, edges='dark',
whiten=(False, True))
plt.savefig('green_orange_snake.png', bbox_inches='tight')
plot_all()
plot_official_logo()
plt.show()
| {
"content_hash": "8ad129a4501e63c64088e80adad4cb86",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 78,
"avg_line_length": 30.606060606060606,
"alnum_prop": 0.6259405940594059,
"repo_name": "chintak/scikit-image",
"id": "85ba51991a7cf08b3ab41c9bbdc68c8c91895fd5",
"size": "5050",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "doc/logo/scikit_image_logo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70225"
},
{
"name": "CSS",
"bytes": "3629"
},
{
"name": "JavaScript",
"bytes": "777"
},
{
"name": "Python",
"bytes": "2115723"
},
{
"name": "Shell",
"bytes": "3346"
}
],
"symlink_target": ""
} |
from collections import Iterable
import logging
import itertools
from pulp import LpProblem, lpSum, LpVariable, LpMinimize, LpMaximize
def _add_elements(seq, *elements):
for element in elements:
seq.append(element)
def _reciprocal(func):
def _wrapper(*args, **kw):
result = func(*args, **kw)
if isinstance(result, Iterable):
return [1.0 / x for x in result]
else:
return 1.0 / result
return _wrapper
def _energy_min(energies, capitals, productions, turn_overs, co2s, dmu_right):
energy_right = dmu_right.energy.total
capital_right = dmu_right.capital.capital
production_right = dmu_right.production.production
turn_over_right = dmu_right.turn_over.turn_over
co2_right = dmu_right.co2.total
prob = LpProblem("lambda_min", LpMinimize)
variables_count = len(energies)
ingredients = [str(symbols + 1) for symbols in range(variables_count)]
symbols = LpVariable.dict("x_%s", ingredients, lowBound=0)
cost = dict(zip(ingredients, energies))
prob += lpSum([cost[i] * symbols[i] for i in ingredients])
capital_dict = dict(zip(ingredients, capitals))
turn_over_dict = dict(zip(ingredients, turn_overs))
production_dict = dict(zip(ingredients, productions))
co2_dict = dict(zip(ingredients, co2s))
prob += lpSum([capital_dict[i] * symbols[i]
for i in ingredients]) <= capital_right
prob += lpSum([turn_over_dict[i] * symbols[i]
for i in ingredients]) >= turn_over_right
prob += lpSum([production_dict[i] * symbols[i]
for i in ingredients]) >= production_right
prob += lpSum([co2_dict[i] * symbols[i]
for i in ingredients]) == co2_right
if prob.solve() != 1:
logging.error("psi minimize unsolved situation")
raise UserWarning
else:
values = prob.objective.value()
return values / energy_right
@_reciprocal
def contemporaneous_energy_min(dmus, dmu_right):
energies = [dmu.energy.total for dmu in dmus]
capitals = [dmu.capital.capital for dmu in dmus]
turn_overs = [dmu.turn_over.turn_over for dmu in dmus]
productions = [dmu.production.production for dmu in dmus]
co2s = [dmu.co2.total for dmu in dmus]
return _energy_min(energies, capitals, productions, turn_overs, co2s, dmu_right)
@_reciprocal
def global_energy_min(dmus_s, *dmus_right):
energies = list(itertools.chain.from_iterable([[dmu.energy.total for dmu in dmus]
for dmus in dmus_s]))
capitals = list(itertools.chain.from_iterable([[dmu.capital.capital for dmu in dmus]
for dmus in dmus_s]))
turn_overs = list(itertools.chain.from_iterable([[dmu.turn_over.turn_over for dmu in dmus]
for dmus in dmus_s]))
productions = list(itertools.chain.from_iterable([[dmu.production.production for dmu in dmus]
for dmus in dmus_s]))
co2s = list(itertools.chain.from_iterable([[dmu.co2.total for dmu in dmus]
for dmus in dmus_s]))
result = []
for dmu_right in dmus_right:
result.append(_energy_min(energies, capitals, productions, turn_overs, co2s, dmu_right))
return result
def _production_max(energies, capitals, productions, turn_overs, co2s, dmu_right):
energy_right = dmu_right.energy.total
capital_right = dmu_right.capital.capital
turn_over_right = dmu_right.turn_over.turn_over
production_right = dmu_right.production.production
co2_right = dmu_right.co2.total
prob = LpProblem("eta max", LpMaximize)
variables_count = len(productions)
ingredients = [str(symbol + 1) for symbol in range(variables_count)]
symbols = LpVariable.dict('x_%s', ingredients, lowBound=0)
cost = dict(zip(ingredients, productions))
prob += lpSum([cost[i] * symbols[i] for i in ingredients])
energy_dict = dict(zip(ingredients, energies))
capital_dict = dict(zip(ingredients, capitals))
turn_over_dict = dict(zip(ingredients, turn_overs))
co2_dict = dict(zip(ingredients, co2s))
prob += lpSum([energy_dict[i] * symbols[i]
for i in ingredients]) <= energy_right
prob += lpSum([capital_dict[i] * symbols[i]
for i in ingredients]) <= capital_right
prob += lpSum([turn_over_dict[i] * symbols[i]
for i in ingredients]) >= turn_over_right
prob += lpSum([co2_dict[i] * symbols[i]
for i in ingredients]) == co2_right
if prob.solve() != 1:
logging.error("eta max unsolved situation")
raise UserWarning
else:
return prob.objective.value() / production_right
@_reciprocal
def contemporaneous_production_max(dmus, dmu_right):
energies = [dmu.energy.total for dmu in dmus]
capitals = [dmu.capital.capital for dmu in dmus]
turn_overs = [dmu.turn_over.turn_over for dmu in dmus]
productions = [dmu.production.production for dmu in dmus]
co2s = [dmu.co2.total for dmu in dmus]
return _production_max(energies, capitals, productions, turn_overs, co2s, dmu_right)
@_reciprocal
def global_production_max(dmus_s, *dmus_right):
energies = list(itertools.chain.from_iterable([[dmu.energy.total for dmu in dmus]
for dmus in dmus_s]))
capitals = list(itertools.chain.from_iterable([[dmu.capital.capital for dmu in dmus]
for dmus in dmus_s]))
turn_overs = list(itertools.chain.from_iterable([[dmu.turn_over.turn_over for dmu in dmus]
for dmus in dmus_s]))
productions = list(itertools.chain.from_iterable([[dmu.production.production for dmu in dmus]
for dmus in dmus_s]))
co2s = list(itertools.chain.from_iterable([[dmu.co2.total for dmu in dmus]
for dmus in dmus_s]))
result = []
for dmu_right in dmus_right:
result.append(_production_max(energies, capitals, productions, turn_overs, co2s, dmu_right))
return result
| {
"content_hash": "f2568451a2a7184d80dcb99d59947d63",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 100,
"avg_line_length": 45.39568345323741,
"alnum_prop": 0.61743264659271,
"repo_name": "gaufung/LMDI",
"id": "f5e38d93ec251930d8684370b985ffbd320edeb7",
"size": "6335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PDA_Transport/optimization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "280166"
}
],
"symlink_target": ""
} |
def extractMoooriyelsmtladventureWordpressCom(item):
'''
Parser for 'moooriyelsmtladventure.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "41278ae55d62fbe8173ecfbe89f6c9a8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.761904761904763,
"alnum_prop": 0.6500857632933105,
"repo_name": "fake-name/ReadableWebProxy",
"id": "ecb754675cee7c191d67f8f5bbca0d31c646395e",
"size": "584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractMoooriyelsmtladventureWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 1800, 1800],
min_off_durations=[12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=8,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
2500: 1e-2
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
NUM_FILTERS = 10
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N // 2,
'W': Normal(std=1/sqrt((N / 2) * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'W': Normal(std=1/sqrt(N / 2)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'W': Normal(std=1/sqrt(N / 2)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs * (N // 2),
'W': Normal(std=1/sqrt(N / 2)),
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| {
"content_hash": "1356209d77718efef967bc51cd05cef4",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 86,
"avg_line_length": 30.959798994974875,
"alnum_prop": 0.5700373316020126,
"repo_name": "mmottahedi/neuralnilm_prototype",
"id": "6537652057c3484f6b5fa9093b532c10d1488a08",
"size": "6161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e340.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
} |
from discord.ext import commands
import os.path
import json
class Battle():
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context = True)
async def start_quest(self, ctx):
"""Starts the quest! Will fill in all it does as I go."""
first_player = ctx.message.author.id
players = {'First': first_player}
print(players)
await self.bot.say('Quest Posted! All who want to join, please step forward! \n:step')
count = 0
posplayer = await self.bot.wait_for_message(timeout=60.0)
if ctx.msg.content.startswith(':ready'):
print('proceed')
elif ctx.msg.content.startswith(':step'):
print('add as new player')
else:
self.bot.say('How did you even get here,{}?'.format(ctx.message.author))
def setup(bot):
bot.add_cog(Battle(bot)) | {
"content_hash": "97fdca40d40284bf1f073bc8b0b2297e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 94,
"avg_line_length": 33.48148148148148,
"alnum_prop": 0.5907079646017699,
"repo_name": "padilin/Discord-RPG-Bot",
"id": "509a361f725015fb9d35be5ce67915b65ea30e45",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "to_battle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7074"
}
],
"symlink_target": ""
} |
from gemstone.core.modules import Module
import gemstone
class FirstModule(Module):
@gemstone.exposed_method("module1.say_hello")
def say_hello(self):
return "Hello from module 1!"
| {
"content_hash": "b4e48417eb21a3288d0ceb1e44629542",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 24.875,
"alnum_prop": 0.7236180904522613,
"repo_name": "vladcalin/pymicroservice",
"id": "0d5f87225693624a0a36908ee48e82206865b344",
"size": "199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/example_modules/module_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "32"
},
{
"name": "Python",
"bytes": "59141"
}
],
"symlink_target": ""
} |
"""A wrapper to have a portable SHA-256 tool."""
# TODO(dmarting): instead of this tool we should make SHA-256 of artifacts
# available in Skylark.
import hashlib
import sys
if __name__ == "__main__":
if len(sys.argv) != 3:
# pylint: disable=superfluous-parens
print("Usage: %s input output" % sys.argv[0])
sys.exit(-1)
with open(sys.argv[2], "w") as outputfile:
with open(sys.argv[1], "rb") as inputfile:
outputfile.write(hashlib.sha256(inputfile.read()).hexdigest())
| {
"content_hash": "ca79178867163688073297852cada879",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 33.13333333333333,
"alnum_prop": 0.6639839034205232,
"repo_name": "variac/bazel",
"id": "12c39dfdc27a5fce6348dd32f17feff4bf618850",
"size": "1099",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/build_defs/hash/sha256.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "24765"
},
{
"name": "C++",
"bytes": "796910"
},
{
"name": "HTML",
"bytes": "18128"
},
{
"name": "Java",
"bytes": "20748121"
},
{
"name": "JavaScript",
"bytes": "5159"
},
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "PowerShell",
"bytes": "7559"
},
{
"name": "Protocol Buffer",
"bytes": "118764"
},
{
"name": "Python",
"bytes": "290195"
},
{
"name": "Shell",
"bytes": "753627"
}
],
"symlink_target": ""
} |
"""
This file is part of Ludolph: Erigones SDDC API plugin
Copyright (C) 2015-2017 Erigones, s. r. o.
See the LICENSE file for copying permission.
"""
import json
import time
import logging
from ludolph_erigones import __version__
from ludolph.command import CommandError, command
from ludolph.message import red, green, blue
from ludolph.plugins.plugin import LudolphPlugin
from erigones_sddc_api.client import Client
from erigones_sddc_api.exceptions import ESAPIError
logger = logging.getLogger(__name__)
class ErigonesApi(LudolphPlugin):
"""
Erigones SDDC API commands. EXPERIMENTAL.
https://my.erigones.com/docs/api/
"""
__version__ = __version__
_actions = {
'post': 'POST',
'create': 'POST',
'put': 'PUT',
'set': 'PUT',
'delete': 'DELETE',
'get': 'GET',
}
persistent_attrs = ('_user_auth',)
def __init__(self, *args, **kwargs):
"""Read Ludolph plugin configuration [ludolph_erigones.erigones_api]"""
super(ErigonesApi, self).__init__(*args, **kwargs)
config = self.config
self._id = 0
self._user_es = {}
self._user_auth = {}
try:
self._api_url = config['api_url'].rstrip('/')
except KeyError:
raise RuntimeError('api_url is not set in erigones_api plugin configuration')
def _get_user_es(self, user, username_or_api_key, password):
"""Create :class:`erigones_sddc_api.Client` instance and perform Erigones SDDC API login"""
err = ''
if password:
logger.debug('Signing in to Erigones SDDC API using user "%s" credentials', user)
es = Client(api_url=self._api_url)
try:
es.login(username_or_api_key, password).content
except Exception as exc:
err = str(exc)
else:
logger.debug('Using user "%s" api_key - skipping Erigones SDDC API login', user)
es = Client(api_url=self._api_url, api_key=username_or_api_key)
if es.is_authenticated():
logger.info('User "%s" login successful at %s', user, es)
else:
logger.error('User "%s" login problem at %s: "%s"', user, es, err)
return es
def _es_request(self, msg, method, resource, _after_relogin=False, **params):
"""Wrapper for getting Erigones SDDC API response with cached content and checking Erigones SDDC API errors"""
user = self.xmpp.get_jid(msg)
if user not in self._user_auth:
logger.error('Erigones SDDC API is not available for user "%s"', user)
raise CommandError('Erigones SDDC API is not available - use __es-login__ '
'to enable API access for your account (%s)' % user)
try:
es = self._user_es[user]
except KeyError:
es = self._user_es[user] = self._get_user_es(user, *self._user_auth[user])
_after_relogin = True
self._id += 1
start_time = time.time()
logger.info('[%s-%05d] User "%s" is calling Erigones SDDC API function: "%s %s"',
start_time, self._id, user, method, resource)
response = es.request(method, resource, **params)
if response.stream:
self.xmpp.msg_reply(msg, 'Waiting for pending task %s ...' % blue(response.task_id), preserve_msg=True)
try:
response.content
except ESAPIError as exc:
if (exc.status_code == 403 and exc.detail == 'Authentication credentials were not provided.'
and self._user_auth[user][1] and not _after_relogin):
logger.warning('Performing user "%s" re-login to Erigones SDDC API at %s', user, es)
if es.login(*self._user_auth[user]).ok:
return self._es_request(msg, method, resource, _after_relogin=True, **params)
if isinstance(exc.detail, (dict, list)): # Create a bit nicer output
try:
err = json.dumps(exc.detail, indent=4)
except ValueError:
err = str(exc.detail)
else:
err = str(exc.detail)
raise CommandError('%s %s: %s' % (exc.__class__.__name__, exc.status_code, err))
finally:
logger.info('[%s-%05d] Erigones SDDC API function "%s %s" called by user "%s" finished in %g seconds',
start_time, self._id, method, resource, user, (time.time() - start_time))
return response
@staticmethod
def _parse_es_parameters(parameters):
"""The es command parameters parser"""
params = {}
key = None
val_next = False
for i in parameters:
if i and i.startswith('-'):
_key = i[1:]
if _key and _key[0].isalnum():
key = _key
params[key] = True
val_next = True
continue
if val_next and key:
_i = str(i).lower()
if _i == 'false':
params[key] = False
elif _i == 'true':
params[key] = True
elif _i == 'null':
params[key] = None
elif i.startswith('json::'):
i = i[6:]
try:
i = json.loads(i)
except ValueError as e:
raise CommandError('Invalid json parameter %s (%s)' % (key, e))
else:
params[key] = i
else:
params[key] = i
return params
@staticmethod
def _vm_status_color(status):
if status.startswith('running'):
color = green
elif status.startswith('stopped') or status == 'stopping':
color = red
elif status == 'pending':
color = blue
else:
return status
return color(status)
@command
def es_login(self, msg, username_or_api_key, password=None):
"""
Sign in to Erigones SDDC API and save your custom api_key or username/password.
Usage: es-login <api_key>
Usage: es-login <username> <password>
"""
user = self.xmpp.get_jid(msg)
es = self._get_user_es(user, username_or_api_key, password)
if es and es.get('/dc').ok:
self._user_es[user] = es
self._user_auth[user] = (username_or_api_key, password)
self._db_save()
return 'Successfully signed in to Erigones SDDC API (%s) and saved your (%s) credentials' % (self._api_url,
user)
else:
raise CommandError('User **%s** authentication against Erigones SDDC API (%s) failed' % (user,
self._api_url))
@command
def es_logout(self, msg):
"""
Sign out of Erigones SDDC API and delete your credentials.
Usage: es-logout
"""
user = self.xmpp.get_jid(msg)
es = self._user_es.pop(user, None)
auth = self._user_auth.pop(user, None)
if auth:
logger.debug('Signing user "%s" out of Erigones SDDC API', user)
self._db_save()
if es and auth[1]: # username/password authentication
try:
es.logout().content
except Exception as exc:
logger.warn('User "%s" logout problem at %s: "%s"', user, es, exc)
else:
logger.info('User "%s" logout successful at %s', user, es)
else:
logger.info('User "%s" is using api_key or was never logged in - skipping logout at %s', user, es)
return 'Successfully signed out of to Erigones SDDC API (%s) and removed your (%s) credentials' % (
self._api_url, user)
raise CommandError('User **%s** logout from Erigones SDDC API (%s) failed: user was never logged in' % (
user, self._api_url))
@command(admin_required=True)
def es(self, msg, action, resource, *parameters):
"""
es - Swiss Army Knife for Erigones SDDC API (EXPERIMENTAL)
Usage: es action </resource> [parameters]
action:\t{get|create|set|delete|options}
resource:\t/some/resource/in/api
parameters:\t-foo baz -bar qux ...
"""
try:
method = self._actions[action.lower()]
except (KeyError, AttributeError):
raise CommandError('Invalid action or method: **%s**' % action)
if not resource.startswith('/'):
raise CommandError('Invalid resource: **%s**' % resource)
res = self._es_request(msg, method, resource, **self._parse_es_parameters(parameters))
out = {
'action': action,
'resource': resource,
'dc': res.dc,
'task_id': res.task_id,
'**status**': res.status_code,
'**result**': res.content.result
}
return json.dumps(out, indent=4)
@command(admin_required=True)
def vm(self, msg, dc=None):
"""
Show a list of all servers.
Usage: vm [dc name]
"""
res = self._es_request(msg, 'GET', '/vm', dc=dc, full=True).content
out = []
for vm in res.result:
out.append('**%s** (%s)\t%s' % (vm['hostname'], vm['alias'], self._vm_status_color(vm['status'])))
out.append('\n**%d** servers are shown in __%s__ datacenter.' % (len(res.result), res.dc))
return '\n'.join(out)
@command(admin_required=True, stream_output=True)
def tasklog_report(self, msg, last=86400, dc=None):
"""
Show task log statistics from one or all datacenters. Default time period is 1 day.
Usage: tasklog-report [time period in seconds] [dc name]
"""
try:
last = int(last)
except (ValueError, TypeError):
raise CommandError('Invalid integer: __%s__' % last)
if dc:
dcs = [dc]
else:
dcs = self._es_request(msg, 'GET', '/dc').content.result
out = 'Task log activity in **%(dc)s** datacenter for last **' + str(last) + '** seconds:\n' \
'Pending: %(pending)s\tSucceeded: %(succeeded)s\tFailed: %(failed)s\tRevoked: %(revoked)s\n--'
for dc_name in dcs:
res = self._es_request(msg, 'GET', '/task/log/report', last=last, dc=dc_name).content
stats = res.result
stats['dc'] = res.dc
if stats['failed']:
stats['failed'] = red(stats['failed'])
yield out % stats
| {
"content_hash": "9bc8050c06829f7cba5861aaaf6a852f",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 119,
"avg_line_length": 35.86229508196721,
"alnum_prop": 0.5259645273358932,
"repo_name": "erigones/ludolph-erigones",
"id": "1b75b6368015634e9c24ce6c86d94bb4e868c8c2",
"size": "10938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ludolph_erigones/erigones_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12635"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions.format import format
from resource_management.libraries.script.script import Script
class KafkaUpgrade(Script):
def copy_kerberos_param(self,env):
import params
kafka_run_path = "/usr/iop/4.1.0.0/kafka/bin/kafka-run-class.sh"
if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
Execute(("sed", "-i", "s/\$CLASSPATH \$KAFKA_OPTS/\$CLASSPATH \$KAFKA_OPTS \$KAFKA_KERBEROS_PARAMS/", kafka_run_path), logoutput=True)
if __name__ == "__main__":
KafkaUpgrade().execute()
| {
"content_hash": "38bb7cd8a2c4c2c8e037380eeaecf4b7",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 140,
"avg_line_length": 41.888888888888886,
"alnum_prop": 0.7738726790450928,
"repo_name": "arenadata/ambari",
"id": "734e9ec8260299817a1b5d7f00acfff675c02832",
"size": "1531",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_upgrade.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
"""Enable/disable ejector."""
from mpf.devices.ball_device.ball_device_ejector import BallDeviceEjector
from mpf.core.delays import DelayManager
from mpf.devices.ball_device.default_ball_search import DefaultBallSearch
class EnableCoilEjector(DefaultBallSearch, BallDeviceEjector):
"""Enable a coil to eject one ball."""
__slots__ = ["delay"]
def __init__(self, config, ball_device, machine):
"""Initialise ejector."""
for option in ["eject_coil", "eject_coil_enable_time"]:
if option not in config and option in ball_device.config:
config[option] = ball_device.config[option]
super().__init__(config, ball_device, machine)
self.delay = DelayManager(self.ball_device.machine)
self.config = self.machine.config_validator.validate_config("ball_device_ejector_enable", self.config)
async def eject_one_ball(self, is_jammed, eject_try, balls_in_device):
"""Enable eject coil."""
del is_jammed
del eject_try
# If multiple eject_coil_enable_time values, they correspond to the # of balls
if self.ball_device.balls <= len(self.config['eject_coil_enable_time']):
eject_time = self.config['eject_coil_enable_time'][balls_in_device - 1]
else:
eject_time = self.config['eject_coil_enable_time'][-1]
# default pulse
self.ball_device.debug_log("Enabling eject coil for %sms, Current balls: %s.",
eject_time,
self.ball_device.balls)
self.config['eject_coil'].enable(max_wait_ms=self.config['eject_coil_max_wait_ms'])
self.delay.reset(name="disable", callback=self._disable_coil,
ms=eject_time)
async def reorder_balls(self):
"""Reordering balls is not supported."""
# TODO: implement
self.ball_device.log.warning("Reordering balls is not implemented in enable ejector")
def _disable_coil(self):
"""Disable the coil."""
self.config['eject_coil'].disable()
def _fire_coil_for_search(self, full_power):
if not full_power:
self.config['eject_coil'].pulse()
else:
self.config['eject_coil'].enable()
self.delay.reset(name="disable", callback=self._disable_coil,
ms=self.config['eject_coil_enable_time'][0])
return True
| {
"content_hash": "3eef7cfe16fbfedd997f26e17d91c7a0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 110,
"avg_line_length": 40.61666666666667,
"alnum_prop": 0.616741895773492,
"repo_name": "missionpinball/mpf",
"id": "0c896a9aa6102a46a6dc3961240188e01a3951e6",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpf/devices/ball_device/enable_coil_ejector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "4019"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "4532953"
}
],
"symlink_target": ""
} |
from participantCollection import ParticipantCollection
import string
import re
import datetime
import pyperclip
# Edit Me!
# Remember, this is during signup, so current month is not March, it's February.
currentMonthTotalDays = 30
currentMonthURL = "https://www.reddit.com/r/pornfree/comments/6xecnc/stay_clean_september_this_thread_updated_daily/"
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
uppercaseMonth = string.upper(nextMonthName)
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
# TODO: testing
# currentDayOfMonthIndex = 28
participants = ParticipantCollection()
initialNumber = participants.size()
def templateForParticipants():
answer = ""
answer += "Here are the **INITIAL_NUMBER participants** who have already signed up:\n\n"
for participant in participants.participants:
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateForTooEarly():
answer = ""
answer += "(Too early. Come back on CURRENT_MONTH_NAME " + str(currentMonthTotalDays - 6) + ")\n"
return answer
def templateForFirstSignupDay():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, we had a great turnout for [Stay Clean CURRENT_MONTH_NAME](CURRENT_MONTH_URL) - let's see if we can knock it out of the park for NEXT_MONTH_NAME. Have you been clean for the month of CURRENT_MONTH_NAME? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread, and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin."
return answer
def templateForMiddleSignupDays():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, so far **INITIAL_NUMBER participants** have signed up. Have you been clean for **[the month of CURRENT_MONTH_NAME](CURRENT_MONTH_URL)**? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateForLastSignupDay():
answer = ""
answer += "LAST CHANCE TO SIGN UP FOR STAY CLEAN UPPERCASE_MONTH! Sign up here!\n"
answer += "The Stay Clean NEXT_MONTH_NAME challenge **begins tomorrow**! So far, we have **INITIAL_NUMBER participants** signed up. If you would like to be included in the challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and we will include you. After midnight tonight, we will not be accepting any more participants. I will create the official update post tomorrow.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateToUse():
if currentDayOfMonthIndex <= (currentMonthTotalDays - 7):
return templateForTooEarly()
elif currentDayOfMonthIndex == (currentMonthTotalDays - 6):
return templateForFirstSignupDay()
elif (currentMonthTotalDays - 5) <= currentDayOfMonthIndex <= (currentMonthTotalDays - 1):
return templateForMiddleSignupDays()
elif currentMonthTotalDays == currentDayOfMonthIndex:
return templateForLastSignupDay()
def stringToPrint():
answer = templateToUse()
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('CURRENT_MONTH_URL', currentMonthURL, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
answer = re.sub('UPPERCASE_MONTH', uppercaseMonth, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| {
"content_hash": "e7bebdee89d0c96fa0be4df5e07b3942",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 569,
"avg_line_length": 62.857142857142854,
"alnum_prop": 0.7022727272727273,
"repo_name": "foobarbazblarg/stayclean",
"id": "953e26b1c67bd5eb5c7010e25dcea1ea21bea247",
"size": "6726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2017-october/display-during-signup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
} |
import atexit
from functools import wraps
import json
import os
import requests
import time
from pyvcloud import vcloudair
from pyvcloud.schema.vcd.v1_5.schemas.vcloud import taskType
from cloudify import ctx
from cloudify import context
from cloudify import exceptions as cfy_exc
TASK_RECHECK_TIMEOUT = 2
TASK_STATUS_SUCCESS = 'success'
TASK_STATUS_ERROR = 'error'
STATUS_COULD_NOT_BE_CREATED = -1
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_DEPLOYED = 2
STATUS_SUSPENDED = 3
STATUS_POWERED_ON = 4
STATUS_POWERED_OFF = 8
STATUS_WAITING_FOR_USER_INPUT = 5
STATUS_UNKNOWN_STATE = 6
STATUS_UNRECOGNIZED_STATE = 7
STATUS_INCONSISTENT_STATE = 9
VCLOUD_STATUS_MAP = {
-1 : "Could not be created",
0 : "Unresolved",
1 : "Resolved",
2 : "Deployed",
3 : "Suspended",
4 : "Powered on",
5 : "Waiting for user input",
6 : "Unknown state",
7 : "Unrecognized state",
8 : "Powered off",
9 : "Inconsistent state",
10 : "Children do not all have the same status",
11 : "Upload initiated, OVF descriptor pending",
12 : "Upload initiated, copying contents",
13 : "Upload initiated , disk contents pending",
14 : "Upload has been quarantined",
15 : "Upload quarantine period has expired"
}
SUBSCRIPTION_SERVICE_TYPE = 'subscription'
ONDEMAND_SERVICE_TYPE = 'ondemand'
def transform_resource_name(res, ctx):
if isinstance(res, basestring):
res = {'name': res}
if not isinstance(res, dict):
raise ValueError("transform_resource_name() expects either string or "
"dict as the first parameter")
pfx = ctx.bootstrap_context.resources_prefix
if not pfx:
return res['name']
name = res['name']
res['name'] = pfx + name
if name.startswith(pfx):
ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it "
"already has this prefix".format(name, pfx))
else:
ctx.logger.info("Transformed resource name '{0}' to '{1}'".format(
name, res['name']))
return res['name']
class Config(object):
VCLOUD_CONFIG_PATH_ENV_VAR = 'VCLOUD_CONFIG_PATH'
VCLOUD_CONFIG_PATH_DEFAULT = '~/vcloud_config.json'
def get(self):
cfg = {}
env_name = self.VCLOUD_CONFIG_PATH_ENV_VAR
default_location_tpl = self.VCLOUD_CONFIG_PATH_DEFAULT
default_location = os.path.expanduser(default_location_tpl)
config_path = os.getenv(env_name, default_location)
try:
with open(config_path) as f:
cfg = json.loads(f.read())
except IOError:
pass
return cfg
class VcloudAirClient(object):
config = Config
LOGIN_RETRY_NUM = 5
def get(self, config=None, *args, **kw):
static_config = self.__class__.config().get()
cfg = {}
cfg.update(static_config)
if config:
cfg.update(config)
return self.connect(cfg)
def connect(self, cfg):
url = cfg.get('url')
username = cfg.get('username')
password = cfg.get('password')
token = cfg.get('token')
service = cfg.get('service')
vdc = cfg.get('vdc')
service_type = cfg.get('service_type', SUBSCRIPTION_SERVICE_TYPE)
region = cfg.get('region')
if not (all([url, token]) or all([url, username, password])):
raise cfy_exc.NonRecoverableError(
"Login credentials must be specified")
if not (service and vdc):
raise cfy_exc.NonRecoverableError(
"vCloud service and vDC must be specified")
if service_type == SUBSCRIPTION_SERVICE_TYPE:
vcloud_air = self._subscription_login(
url, username, password, token, service, vdc)
elif service_type == ONDEMAND_SERVICE_TYPE:
vcloud_air = self._ondemand_login(
url, username, password, token, region)
else:
cfy_exc.NonRecoverableError(
"Unrecognized service type: {0}".format(service_type))
return vcloud_air
def _subscription_login(self, url, username, password, token, service,
vdc):
logined = False
vdc_logined = False
vca = vcloudair.VCA(
url, username, service_type=SUBSCRIPTION_SERVICE_TYPE,
version='5.6')
if token:
for _ in range(self.LOGIN_RETRY_NUM):
success = vca.login(token=token)
if success is False:
ctx.logger.info("Login using token failed.")
continue
else:
logined = True
ctx.logger.info("Login using token successful.")
break
if logined is False and password:
for _ in range(self.LOGIN_RETRY_NUM):
success = vca.login(password)
if success is False:
ctx.logger.info("Login using password failed. Retrying...")
continue
else:
logined = True
ctx.logger.info("Login using password successful.")
break
for _ in range(self.LOGIN_RETRY_NUM):
success = vca.login_to_org(service, vdc)
if success is False:
ctx.logger.info("Login to VDC failed. Retrying...")
continue
else:
vdc_logined = True
ctx.logger.info("Login to VDC successful.")
break
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
if vdc_logined is False:
raise cfy_exc.NonRecoverableError("Could not login to VDC")
atexit.register(vca.logout)
return vca
def _ondemand_login(self, url, username, password, token, region):
if region is None:
raise cfy_exc.NonRecoverableError(
"Region should be specified for OnDemand login")
logined = False
instance_logined = False
vca = vcloudair.VCA(
url, username, service_type=ONDEMAND_SERVICE_TYPE, version='5.7')
if token:
for _ in range(self.LOGIN_RETRY_NUM):
success = vca.login(token=token)
if success is False:
ctx.logger.info("Login using token failed.")
continue
else:
logined = True
ctx.logger.info("Login using token successful.")
break
if logined is False and password:
for _ in range(self.LOGIN_RETRY_NUM):
success = vca.login(password)
if success is False:
ctx.logger.info("Login using password failed. Retrying...")
continue
else:
logined = True
ctx.logger.info("Login using password successful.")
break
for _ in range(self.LOGIN_RETRY_NUM):
all_instances = vca.get_instances() or []
instances = [instance for instance in all_instances
if instance['region'] == region]
if len(instances) == 0:
cfy_exc.NonRecoverableError("No instances to login to.")
instance = instances[0]
success = vca.login_to_instance(instance['id'], password, token,
None)
if success is False:
ctx.logger.info("Login to instance failed. Retrying...")
continue
else:
instance_logined = True
ctx.logger.info("Login to instance successful.")
break
for _ in range(self.LOGIN_RETRY_NUM):
instance = vca.get_instances()[0]
success = vca.login_to_instance(instance['id'], None,
vca.vcloud_session.token,
vca.vcloud_session.org_url)
if success is False:
ctx.logger.info("Login to instance failed. Retrying...")
continue
else:
instance_logined = True
ctx.logger.info("Login to instance successful.")
break
if logined is False:
raise cfy_exc.NonRecoverableError("Invalid login credentials")
if instance_logined is False:
raise cfy_exc.NonRecoverableError("Could not login to instance")
atexit.register(vca.logout)
return vca
def with_vca_client(f):
@wraps(f)
def wrapper(*args, **kw):
config = None
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get('vcloud_config')
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get('vcloud_config')
else:
raise cfy_exc.NonRecoverableError("Unsupported context")
client = VcloudAirClient().get(config=config)
kw['vca_client'] = client
return f(*args, **kw)
return wrapper
def wait_for_task(vca_client, task):
status = task.get_status()
while status != TASK_STATUS_SUCCESS:
if status == TASK_STATUS_ERROR:
error = task.get_Error()
raise cfy_exc.NonRecoverableError(
"Error during task execution: {0}".format(error.get_message()))
else:
time.sleep(TASK_RECHECK_TIMEOUT)
response = requests.get(
task.get_href(),
headers=vca_client.vcloud_session.get_vcloud_headers())
task = taskType.parseString(response.content, True)
status = task.get_status()
def get_vcloud_config():
config = None
if ctx.type == context.NODE_INSTANCE:
config = ctx.node.properties.get('vcloud_config')
elif ctx.type == context.RELATIONSHIP_INSTANCE:
config = ctx.source.node.properties.get('vcloud_config')
else:
raise cfy_exc.NonRecoverableError("Unsupported context")
static_config = Config().get()
if config:
static_config.update(config)
return static_config
| {
"content_hash": "2700786be3d46b3af6b0895b0a76ac57",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 79,
"avg_line_length": 33.99009900990099,
"alnum_prop": 0.5649092144868434,
"repo_name": "geokala/tosca-vcloud-plugin",
"id": "e8abef2c2cd430b0f5cbfdaa0c2a153f9c0eb4be",
"size": "10927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcloud_plugin_common/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "70567"
}
],
"symlink_target": ""
} |
"""
Start the GEOS server from command line.
"""
from geos import app
import argparse
import geos.mapsource
import pkg_resources
from geos.kml import URLFormatter
MAPSOURCES = pkg_resources.resource_filename("geos", "mapsources")
app.config.from_object('geos.default_settings')
def run_app(default_host=app.config['HOST'], default_port=app.config['PORT']):
argp = argparse.ArgumentParser("geos")
argp.add_argument("-m", "--mapsource", required=False,
default=None,
help="path to the directory containing the mapsource files. [default: integrated mapsources]")
argp.add_argument("-H", "--host", required=False,
help="Hostname of the Flask app [default {}]".format(default_host),
default=default_host)
argp.add_argument("-P", "--port", required=False,
help="Port for the Flask app [default {}]".format(default_port),
default=default_port)
argp.add_argument("--display-host", required=False,
help="Hostname used for self-referencing links [defaults to Flask hostname]",
default=None)
argp.add_argument("--display-port", required=False,
help="Port used for self-referencing links [defaults to Flask port]",
default=None)
argp.add_argument("--display-scheme", required=False,
help="URI-scheme used for self-referencing links [default {}]".format(app.config["PREFERRED_URL_SCHEME"]),
default=None)
args = argp.parse_args()
app.config['url_formatter'] = URLFormatter(
host=args.display_host if args.display_host else args.host,
port=args.display_port if args.display_port else args.port,
url_scheme=args.display_scheme if args.display_scheme else app.config["PREFERRED_URL_SCHEME"]
)
app.config['mapsources'] = geos.mapsource.load_maps(MAPSOURCES)
if args.mapsource is not None:
app.config["mapsources"].update(geos.mapsource.load_maps(args.mapsource))
app.run(
host=args.host,
port=int(args.port)
)
if __name__ == "__main__":
run_app()
| {
"content_hash": "be914e590f4d78fb7825d735969748cc",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 128,
"avg_line_length": 40.94444444444444,
"alnum_prop": 0.6246042514699232,
"repo_name": "grst/geos",
"id": "ebf466a658d8d1cbb862c3a4bd0cd31f63a67406",
"size": "2235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geos/scripts/runserver.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1460"
},
{
"name": "Dockerfile",
"bytes": "136"
},
{
"name": "HTML",
"bytes": "5154"
},
{
"name": "JavaScript",
"bytes": "13013"
},
{
"name": "Python",
"bytes": "101031"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TalentSaturation'
db.create_table(u'analytics_talentsaturation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('year', self.gf('django.db.models.fields.IntegerField')(unique=True)),
('headcount', self.gf('django.db.models.fields.IntegerField')()),
('attrition_pc', self.gf('django.db.models.fields.DecimalField')(default=5.0, max_digits=5, decimal_places=2)),
('cagr_pc', self.gf('django.db.models.fields.DecimalField')(default=8.6, max_digits=5, decimal_places=2)),
('fresher_hiring_pc', self.gf('django.db.models.fields.DecimalField')(default=95.0, max_digits=5, decimal_places=2)),
('need_for_experience_pc', self.gf('django.db.models.fields.DecimalField')(default=45.0, max_digits=5, decimal_places=2)),
))
db.send_create_signal(u'analytics', ['TalentSaturation'])
def backwards(self, orm):
# Deleting model 'TalentSaturation'
db.delete_table(u'analytics_talentsaturation')
models = {
'admin.company': {
'Meta': {'object_name': 'Company'},
'company_type': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'nasscom_membership_number': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '20'}),
'training_provider': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.institution': {
'Meta': {'object_name': 'Institution'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analytics.City']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_university': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'university_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.occupation': {
'Meta': {'object_name': 'Occupation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.Track']", 'null': 'True', 'blank': 'True'})
},
'admin.sector': {
'Meta': {'object_name': 'Sector', 'index_together': "[['name']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '9', 'db_index': 'True'})
},
'admin.subsector': {
'Meta': {'unique_together': "(('sector', 'name'),)", 'object_name': 'SubSector', 'index_together': "[['name', 'sector']]"},
'career_guide': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobility_map': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"})
},
'admin.track': {
'Meta': {'object_name': 'Track'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'analytics.city': {
'Meta': {'unique_together': "(('name', 'state'),)", 'object_name': 'City'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analytics.State']"})
},
u'analytics.companyyeardata': {
'Meta': {'unique_together': "(('year', 'company'),)", 'object_name': 'CompanyYearData'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revenue': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.demanddata': {
'Meta': {'unique_together': "(('year', 'city', 'occupation', 'company'),)", 'object_name': 'DemandData'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analytics.City']"}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Company']"}),
'demand': ('django.db.models.fields.IntegerField', [], {}),
'headcount': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Occupation']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.diversityratiolevel': {
'Meta': {'object_name': 'DiversityRatioLevel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male_entry': ('django.db.models.fields.IntegerField', [], {}),
'male_leadership': ('django.db.models.fields.IntegerField', [], {}),
'male_middle': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
},
u'analytics.diversityratiosubsector': {
'Meta': {'unique_together': "(('year', 'subsector'),)", 'object_name': 'DiversityRatioSubsector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'subsector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.genderdiversity': {
'Meta': {'unique_together': "(('year', 'category'),)", 'object_name': 'GenderDiversity'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.itspend': {
'Meta': {'unique_together': "(('year', 'sub_sector'),)", 'object_name': 'ITSpend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'india_revenue': ('django.db.models.fields.IntegerField', [], {}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'world_spend': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.revenueoccupation': {
'Meta': {'unique_together': "(('year', 'occupation'),)", 'object_name': 'RevenueOccupation'},
'cagr_next_7_years': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Occupation']"}),
'revenue': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.revenuesubsector': {
'Meta': {'unique_together': "(('year', 'sub_sector'),)", 'object_name': 'RevenueSubsector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revenue': ('django.db.models.fields.IntegerField', [], {}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.revenuetotal': {
'Meta': {'object_name': 'RevenueTotal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'most_likely_growth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'optimistic_growth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
},
u'analytics.state': {
'Meta': {'unique_together': "(('name', 'region'),)", 'object_name': 'State'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
u'analytics.supplybase': {
'Meta': {'unique_together': "(('year', 'city', 'occupation', 'institution', 'degree'),)", 'object_name': 'SupplyBase'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analytics.City']"}),
'degree': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Institution']"}),
'occupation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Occupation']"}),
'supply': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'analytics.talentsaturation': {
'Meta': {'object_name': 'TalentSaturation'},
'attrition_pc': ('django.db.models.fields.DecimalField', [], {'default': '5.0', 'max_digits': '5', 'decimal_places': '2'}),
'cagr_pc': ('django.db.models.fields.DecimalField', [], {'default': '8.6', 'max_digits': '5', 'decimal_places': '2'}),
'fresher_hiring_pc': ('django.db.models.fields.DecimalField', [], {'default': '95.0', 'max_digits': '5', 'decimal_places': '2'}),
'headcount': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'need_for_experience_pc': ('django.db.models.fields.DecimalField', [], {'default': '45.0', 'max_digits': '5', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
}
}
complete_apps = ['analytics'] | {
"content_hash": "2deb9929137bac18b79f5affe3d6287a",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 164,
"avg_line_length": 70.96067415730337,
"alnum_prop": 0.5469875702636371,
"repo_name": "arpitprogressive/arpittest",
"id": "c6fafe0f0bc5907a50d748df534ce8394873bf26",
"size": "12655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/analytics/migrations/0004_auto__add_talentsaturation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133532"
},
{
"name": "JavaScript",
"bytes": "227983"
},
{
"name": "Python",
"bytes": "782274"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
"""Support for statistics for sensor values."""
from collections import deque
import logging
import statistics
import voluptuous as vol
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import execute, session_scope
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
async_track_point_in_utc_time,
async_track_state_change_event,
)
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AVERAGE_CHANGE = "average_change"
ATTR_CHANGE = "change"
ATTR_CHANGE_RATE = "change_rate"
ATTR_COUNT = "count"
ATTR_MAX_AGE = "max_age"
ATTR_MAX_VALUE = "max_value"
ATTR_MEAN = "mean"
ATTR_MEDIAN = "median"
ATTR_MIN_AGE = "min_age"
ATTR_MIN_VALUE = "min_value"
ATTR_SAMPLING_SIZE = "sampling_size"
ATTR_STANDARD_DEVIATION = "standard_deviation"
ATTR_TOTAL = "total"
ATTR_VARIANCE = "variance"
CONF_SAMPLING_SIZE = "sampling_size"
CONF_MAX_AGE = "max_age"
CONF_PRECISION = "precision"
DEFAULT_NAME = "Stats"
DEFAULT_SIZE = 20
DEFAULT_PRECISION = 2
ICON = "mdi:calculator"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SAMPLING_SIZE, default=DEFAULT_SIZE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_AGE): cv.time_period,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Statistics sensor."""
entity_id = config.get(CONF_ENTITY_ID)
name = config.get(CONF_NAME)
sampling_size = config.get(CONF_SAMPLING_SIZE)
max_age = config.get(CONF_MAX_AGE)
precision = config.get(CONF_PRECISION)
async_add_entities(
[StatisticsSensor(entity_id, name, sampling_size, max_age, precision)], True
)
return True
class StatisticsSensor(Entity):
"""Representation of a Statistics sensor."""
def __init__(self, entity_id, name, sampling_size, max_age, precision):
"""Initialize the Statistics sensor."""
self._entity_id = entity_id
self.is_binary = self._entity_id.split(".")[0] == "binary_sensor"
self._name = name
self._sampling_size = sampling_size
self._max_age = max_age
self._precision = precision
self._unit_of_measurement = None
self.states = deque(maxlen=self._sampling_size)
self.ages = deque(maxlen=self._sampling_size)
self.count = 0
self.mean = self.median = self.stdev = self.variance = None
self.total = self.min = self.max = None
self.min_age = self.max_age = None
self.change = self.average_change = self.change_rate = None
self._update_listener = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def async_stats_sensor_state_listener(event):
"""Handle the sensor state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
self._add_state_to_queue(new_state)
self.async_schedule_update_ha_state(True)
@callback
def async_stats_sensor_startup(_):
"""Add listener and get recorded state."""
_LOGGER.debug("Startup for %s", self.entity_id)
async_track_state_change_event(
self.hass, [self._entity_id], async_stats_sensor_state_listener
)
if "recorder" in self.hass.config.components:
# Only use the database if it's configured
self.hass.async_create_task(self._async_initialize_from_database())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_stats_sensor_startup
)
def _add_state_to_queue(self, new_state):
"""Add the state to the queue."""
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
return
try:
if self.is_binary:
self.states.append(new_state.state)
else:
self.states.append(float(new_state.state))
self.ages.append(new_state.last_updated)
except ValueError:
_LOGGER.error(
"%s: parsing error, expected number and received %s",
self.entity_id,
new_state.state,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.mean if not self.is_binary else self.count
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement if not self.is_binary else None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if not self.is_binary:
return {
ATTR_SAMPLING_SIZE: self._sampling_size,
ATTR_COUNT: self.count,
ATTR_MEAN: self.mean,
ATTR_MEDIAN: self.median,
ATTR_STANDARD_DEVIATION: self.stdev,
ATTR_VARIANCE: self.variance,
ATTR_TOTAL: self.total,
ATTR_MIN_VALUE: self.min,
ATTR_MAX_VALUE: self.max,
ATTR_MIN_AGE: self.min_age,
ATTR_MAX_AGE: self.max_age,
ATTR_CHANGE: self.change,
ATTR_AVERAGE_CHANGE: self.average_change,
ATTR_CHANGE_RATE: self.change_rate,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def _purge_old(self):
"""Remove states which are older than self._max_age."""
now = dt_util.utcnow()
_LOGGER.debug(
"%s: purging records older then %s(%s)",
self.entity_id,
dt_util.as_local(now - self._max_age),
self._max_age,
)
while self.ages and (now - self.ages[0]) > self._max_age:
_LOGGER.debug(
"%s: purging record with datetime %s(%s)",
self.entity_id,
dt_util.as_local(self.ages[0]),
(now - self.ages[0]),
)
self.ages.popleft()
self.states.popleft()
def _next_to_purge_timestamp(self):
"""Find the timestamp when the next purge would occur."""
if self.ages and self._max_age:
# Take the oldest entry from the ages list and add the configured max_age.
# If executed after purging old states, the result is the next timestamp
# in the future when the oldest state will expire.
return self.ages[0] + self._max_age
return None
async def async_update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("%s: updating statistics", self.entity_id)
if self._max_age is not None:
self._purge_old()
self.count = len(self.states)
if not self.is_binary:
try: # require only one data point
self.mean = round(statistics.mean(self.states), self._precision)
self.median = round(statistics.median(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.mean = self.median = STATE_UNKNOWN
try: # require at least two data points
self.stdev = round(statistics.stdev(self.states), self._precision)
self.variance = round(statistics.variance(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.stdev = self.variance = STATE_UNKNOWN
if self.states:
self.total = round(sum(self.states), self._precision)
self.min = round(min(self.states), self._precision)
self.max = round(max(self.states), self._precision)
self.min_age = self.ages[0]
self.max_age = self.ages[-1]
self.change = self.states[-1] - self.states[0]
self.average_change = self.change
self.change_rate = 0
if len(self.states) > 1:
self.average_change /= len(self.states) - 1
time_diff = (self.max_age - self.min_age).total_seconds()
if time_diff > 0:
self.change_rate = self.change / time_diff
self.change = round(self.change, self._precision)
self.average_change = round(self.average_change, self._precision)
self.change_rate = round(self.change_rate, self._precision)
else:
self.total = self.min = self.max = STATE_UNKNOWN
self.min_age = self.max_age = dt_util.utcnow()
self.change = self.average_change = STATE_UNKNOWN
self.change_rate = STATE_UNKNOWN
# If max_age is set, ensure to update again after the defined interval.
next_to_purge_timestamp = self._next_to_purge_timestamp()
if next_to_purge_timestamp:
_LOGGER.debug(
"%s: scheduling update at %s", self.entity_id, next_to_purge_timestamp
)
if self._update_listener:
self._update_listener()
self._update_listener = None
@callback
def _scheduled_update(now):
"""Timer callback for sensor update."""
_LOGGER.debug("%s: executing scheduled update", self.entity_id)
self.async_schedule_update_ha_state(True)
self._update_listener = None
self._update_listener = async_track_point_in_utc_time(
self.hass, _scheduled_update, next_to_purge_timestamp
)
async def _async_initialize_from_database(self):
"""Initialize the list of states from the database.
The query will get the list of states in DESCENDING order so that we
can limit the result to self._sample_size. Afterwards reverse the
list so that we get it in the right order again.
If MaxAge is provided then query will restrict to entries younger then
current datetime - MaxAge.
"""
_LOGGER.debug("%s: initializing values from the database", self.entity_id)
with session_scope(hass=self.hass) as session:
query = session.query(States).filter(
States.entity_id == self._entity_id.lower()
)
if self._max_age is not None:
records_older_then = dt_util.utcnow() - self._max_age
_LOGGER.debug(
"%s: retrieve records not older then %s",
self.entity_id,
records_older_then,
)
query = query.filter(States.last_updated >= records_older_then)
else:
_LOGGER.debug("%s: retrieving all records", self.entity_id)
query = query.order_by(States.last_updated.desc()).limit(
self._sampling_size
)
states = execute(query, to_native=True, validate_entity_ids=False)
for state in reversed(states):
self._add_state_to_queue(state)
self.async_schedule_update_ha_state(True)
_LOGGER.debug("%s: initializing from database completed", self.entity_id)
| {
"content_hash": "29676833a1ccc7e92cd93b3c09396cc6",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 88,
"avg_line_length": 35.88728323699422,
"alnum_prop": 0.5866956591769349,
"repo_name": "mKeRix/home-assistant",
"id": "945e5ff89d16f99ca3bf12d082fd3c8686e0dfd6",
"size": "12417",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/statistics/sensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1466026"
},
{
"name": "Python",
"bytes": "4770710"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "12407"
}
],
"symlink_target": ""
} |
from pywps.dblog import store_status
from pywps.response.status import WPS_STATUS
from jinja2 import Environment, PackageLoader
import os
class RelEnvironment(Environment):
"""Override join_path() to enable relative template paths."""
def join_path(self, template, parent):
return os.path.dirname(parent) + '/' + template
def get_response(operation):
from .capabilities import CapabilitiesResponse
from .describe import DescribeResponse
from .execute import ExecuteResponse
if operation == "capabilities":
return CapabilitiesResponse
elif operation == "describe":
return DescribeResponse
elif operation == "execute":
return ExecuteResponse
class WPSResponse(object):
def __init__(self, wps_request, uuid=None, version="1.0.0"):
self.wps_request = wps_request
self.uuid = uuid
self.message = ''
self.status = WPS_STATUS.ACCEPTED
self.status_percentage = 0
self.doc = None
self.version = version
self.template_env = RelEnvironment(
loader=PackageLoader('pywps', 'templates'),
trim_blocks=True, lstrip_blocks=True,
autoescape=True,
)
def _update_status(self, status, message, status_percentage):
"""
Update status report of currently running process instance
:param str message: Message you need to share with the client
:param int status_percentage: Percent done (number betwen <0-100>)
:param pywps.response.status.WPS_STATUS status: process status - user should usually
ommit this parameter
"""
self.message = message
self.status = status
self.status_percentage = status_percentage
store_status(self.uuid, self.status, self.message, self.status_percentage)
def get_response_doc(self):
try:
self.doc = self._construct_doc()
except Exception as e:
if hasattr(e, "description"):
msg = e.description
else:
msg = e
self._update_status(WPS_STATUS.FAILED, msg, 100)
raise e
else:
self._update_status(WPS_STATUS.SUCCEEDED, u"Response generated", 100)
return self.doc
| {
"content_hash": "efa1fa1bd2b4a536a652327117485ffc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 92,
"avg_line_length": 31.805555555555557,
"alnum_prop": 0.6327510917030568,
"repo_name": "tomkralidis/pywps",
"id": "3ecfa222cd1ede842da11142514062956294b691",
"size": "2290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywps/response/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "310546"
}
],
"symlink_target": ""
} |
"""This script reads our requirements.txt file and removes the pinned versions"""
import re
import os
from os.path import abspath, dirname
TOP_LEVEL = abspath(dirname(dirname(dirname(__file__))))
REQUIREMENTS = os.path.join(TOP_LEVEL, 'requirements.txt')
BUILD_REQUIREMENTS = os.path.join(TOP_LEVEL, 'build_tools', 'build_requirements.txt')
def find_latest_dependencies(*requirements_files):
"""Given one or more requirements.txt files, strip off any pinned versions
Parameters
----------
*requirements_files : str
One or more paths to requirements.txt files to parse
Returns
-------
requirements : list
List of parsed dependencies without their pinned versions
"""
requirements = []
for requirements_file in requirements_files:
with open(requirements_file) as file:
for line in file:
requirement = line.strip()
if line.startswith('#'):
continue
match = re.match(r'^([A-Za-z\-0-9]+)', requirement)
if match.group(0).lower() not in requirements:
requirements.append(match.group(0).lower())
return requirements
requirements = find_latest_dependencies(REQUIREMENTS, BUILD_REQUIREMENTS)
# We print because this is called from a bash script and we need to return a
# space-separated list
print(' '.join(requirements))
| {
"content_hash": "f1e073788321dbb429f805d634acc547",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 85,
"avg_line_length": 33.476190476190474,
"alnum_prop": 0.6557610241820768,
"repo_name": "alkaline-ml/pmdarima",
"id": "1528e45631e7ec54080d0c82be516d20b2f950f9",
"size": "1406",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build_tools/github/get_latest_dependencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "277"
},
{
"name": "Cython",
"bytes": "9659"
},
{
"name": "Jupyter Notebook",
"bytes": "19652"
},
{
"name": "Makefile",
"bytes": "2159"
},
{
"name": "Python",
"bytes": "554553"
},
{
"name": "Shell",
"bytes": "10301"
}
],
"symlink_target": ""
} |
def main(j, args, params, tags, tasklet):
doc = args.doc
ayspath = args.getTag('ayspath') or ''
if ayspath:
repo = j.atyourservice.repoGet(ayspath)
templates = repo.templates
else:
templates = j.atyourservice.actorTemplates
out = []
try:
reponame = j.sal.fs.getBaseName(ayspath)
args.doc.applyTemplate({'templates': list(templates.values()), 'aysrepo': ayspath, 'reponame': reponame})
except Exception as e:
args.doc.applyTemplate({'error': str(e)})
params.result = (doc, doc)
return params
| {
"content_hash": "7a43005ee7d5bfd70af5bcca0a619999",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 113,
"avg_line_length": 30.42105263157895,
"alnum_prop": 0.6262975778546713,
"repo_name": "Jumpscale/jumpscale_portal8",
"id": "618b9c24d5b9cc88d977a101ab47f70a065a4806",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/portalbase/AYS81/.macros/wiki/actortemplates/3_actortemplates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "482591"
},
{
"name": "HTML",
"bytes": "313255"
},
{
"name": "JavaScript",
"bytes": "8815099"
},
{
"name": "PHP",
"bytes": "205758"
},
{
"name": "Python",
"bytes": "974012"
},
{
"name": "Ruby",
"bytes": "28925"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
} |
"""
Created on Tue Sep 15 20:25:53 2015
@author: Wasit
"""
import numpy as np
import pandas as pd
df = pd.read_csv('cs401.csv',delimiter=",",parse_dates=True,
infer_datetime_format=True,dayfirst=False,encoding='utf8')
myheader=list(df.columns.values)
advisors={'name':[]}
for ad in df[myheader[7]]:
if ad not in advisors['name']:
advisors['name'].append(ad)
advisors['name'].sort()
advisors['normal']=np.zeros(len(advisors['name']))
advisors['special']=np.zeros(len(advisors['name']))
matching={}
for record in df.values:
for student_id in record[1:3]:
#if id is not Nan
if student_id==student_id:
#if special sudent
if str(int(student_id))[2:6] =='0965':
advisors['special'][advisors['name'].index(record[7])]+=1
else:
advisors['normal'][advisors['name'].index(record[7])]+=1
writer = pd.ExcelWriter('cs401_out.xlsx')
df.to_excel(writer,'Sheet1')
pd.DataFrame(advisors).to_excel(writer,'advisors')
writer.save() | {
"content_hash": "de3218a6d6236b0377ff95e2b42b3611",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 30.38235294117647,
"alnum_prop": 0.6292352371732817,
"repo_name": "wasit7/cs426",
"id": "d9e598c1827654c8d5d48cd96efb79fd5b5cde76",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lectures/week05_profile&Matrix/pandas/cs401.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "115"
},
{
"name": "C",
"bytes": "1930"
},
{
"name": "C++",
"bytes": "11607"
},
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "79351"
},
{
"name": "Shell",
"bytes": "1943"
}
],
"symlink_target": ""
} |
import json
import requests
from .device import Device
from .channel import Channel
from .contact import Contact
from .invalid_key_error import InvalidKeyError
from .filetype import get_file_type
class PushBullet(object):
DEVICES_URL = "https://api.pushbullet.com/v2/devices"
CONTACTS_URL = "https://api.pushbullet.com/v2/contacts"
CHANNELS_URL = "https://api.pushbullet.com/v2/channels"
ME_URL = "https://api.pushbullet.com/v2/users/me"
PUSH_URL = "https://api.pushbullet.com/v2/pushes"
UPLOAD_REQUEST_URL = "https://api.pushbullet.com/v2/upload-request"
def __init__(self, api_key):
self.api_key = api_key
self._json_header = {'Content-Type': 'application/json'}
self._session = requests.Session()
self._session.auth = (self.api_key, "")
self._session.headers.update(self._json_header)
self.refresh()
def _get_data(self, url):
resp = self._session.get(url)
if resp.status_code == 401:
raise InvalidKeyError()
return resp.json()
def _load_devices(self):
self.devices = []
resp_dict = self._get_data(self.DEVICES_URL)
device_list = resp_dict.get("devices", [])
for device_info in device_list:
if device_info.get("active"):
d = Device(self, device_info)
self.devices.append(d)
def _load_contacts(self):
self.contacts = []
resp_dict = self._get_data(self.CONTACTS_URL)
contacts_list = resp_dict.get("contacts", [])
for contact_info in contacts_list:
if contact_info.get("active"):
c = Contact(self, contact_info)
self.contacts.append(c)
def _load_user_info(self):
self.user_info = self._get_data(self.ME_URL)
def _load_channels(self):
self.channels = []
resp_dict = self._get_data(self.CHANNELS_URL)
channel_list = resp_dict.get("channels", [])
for channel_info in channel_list:
if channel_info.get("active"):
c = Channel(self, channel_info)
self.channels.append(c)
@staticmethod
def _recipient(device=None, contact=None, email=None, channel=None):
data = dict()
if device:
data["device_iden"] = device.device_iden
elif contact:
data["email"] = contact.email
elif email:
data["email"] = email
elif channel:
data["channel_tag"] = channel.channel_tag
return data
def new_device(self, nickname):
data = {"nickname": nickname, "type": "stream"}
r = self._session.post(self.DEVICES_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_device = Device(self, r.json())
self.devices.append(new_device)
return True, new_device
else:
return False, None
def new_contact(self, name, email):
data = {"name": name, "email": email}
r = self._session.post(self.CONTACTS_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_contact = Contact(self, r.json())
self.contacts.append(new_contact)
return True, new_contact
else:
return False, None
def edit_device(self, device, nickname=None, model=None, manufacturer=None):
data = {"nickname": nickname}
iden = device.device_iden
r = self._session.post("{}/{}".format(self.DEVICES_URL, iden), data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_device = Device(self, r.json())
self.devices[self.devices.index(device)] = new_device
return True, new_device
else:
return False, device
def edit_contact(self, contact, name):
data = {"name": name}
iden = contact.iden
r = self._session.post("{}/{}".format(self.CONTACTS_URL, iden),
data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_contact = Contact(self, r.json())
self.contacts[self.contacts.index(contact)] = new_contact
return True, new_contact
else:
return False, contact
def remove_device(self, device):
iden = device.device_iden
r = self._session.delete("{}/{}".format(self.DEVICES_URL, iden))
if r.status_code == requests.codes.ok:
self.devices.remove(device)
return True, r.json()
else:
return False, r.json()
def remove_contact(self, contact):
iden = contact.iden
r = self._session.delete("{}/{}".format(self.CONTACTS_URL, iden))
if r.status_code == requests.codes.ok:
self.contacts.remove(contact)
return True, r.json()
else:
return False, r.json()
def get_pushes(self, modified_after=None, limit=None):
data = {"modified_after": modified_after, "limit":limit}
pushes_list = []
get_more_pushes = True
while get_more_pushes:
r = self._session.get(self.PUSH_URL, params=data)
if r.status_code != requests.codes.ok:
return False, r.json()
pushes_list += r.json().get("pushes")
if 'cursor' in r.json() and (not limit or len(pushes_list) < limit):
data['cursor'] = r.json()['cursor']
else:
get_more_pushes = False
return True, pushes_list
def dismiss_push(self, iden):
data = {"dismissed": True}
r = self._session.post("{}/{}".format(self.PUSH_URL, iden), data=json.dumps(data))
if r.status_code == requests.codes.ok:
return True, r.json()
else:
return False, r.json()
def delete_push(self, iden):
r = self._session.delete("{}/{}".format(self.PUSH_URL, iden))
if r.status_code == requests.codes.ok:
return True, r.json()
else:
return False, r.json()
def upload_file(self, f, file_name, file_type=None):
if not file_type:
file_type = get_file_type(f, file_name)
data = {"file_name": file_name, "file_type": file_type}
# Request url for file upload
r = self._session.post(self.UPLOAD_REQUEST_URL, data=json.dumps(data))
if r.status_code != requests.codes.ok:
return False, r.json()
upload_data = r.json().get("data")
file_url = r.json().get("file_url")
upload_url = r.json().get("upload_url")
upload = requests.post(upload_url, data=upload_data, files={"file": f})
return True, {"file_type": file_type, "file_url": file_url, "file_name": file_name}
def push_file(self, file_name, file_url, file_type, body=None, device=None, contact=None, email=None, channel=None):
data = {"type": "file", "file_type": file_type, "file_url": file_url, "file_name": file_name}
if body:
data["body"] = body
data.update(PushBullet._recipient(device, contact, email, channel))
return self._push(data)
def push_note(self, title, body, device=None, contact=None, email=None):
data = {"type": "note", "title": title, "body": body}
data.update(PushBullet._recipient(device, contact, email))
return self._push(data)
def push_address(self, name, address, device=None, contact=None, email=None):
data = {"type": "address", "name": name, "address": address}
data.update(PushBullet._recipient(device, contact, email))
return self._push(data)
def push_list(self, title, items, device=None, contact=None, email=None):
data = {"type": "list", "title": title, "items": items}
data.update(PushBullet._recipient(device, contact, email))
return self._push(data)
def push_link(self, title, url, body=None, device=None, contact=None, email=None):
data = {"type": "link", "title": title, "url": url, "body": body}
data.update(PushBullet._recipient(device, contact, email))
return self._push(data)
def _push(self, data):
r = self._session.post(self.PUSH_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
return True, r.json()
else:
return False, r.json()
def refresh(self):
self._load_devices()
self._load_contacts()
self._load_user_info()
self._load_channels()
| {
"content_hash": "7d65664cfa35243cfeb351225913621d",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 120,
"avg_line_length": 33.201550387596896,
"alnum_prop": 0.5754144291384543,
"repo_name": "aerobit/pushbullet.py",
"id": "83daf5a48d6d78d6ccbf633070aa6c484fb65592",
"size": "8566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pushbullet/pushbullet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22827"
}
],
"symlink_target": ""
} |
import struct
import Config
from Util import Util
class MsgHead:
parser = struct.Struct('=iibb%dsiii' % (Config.IDLEN + 2))
_fields = ['pos', 'len', 'sent', 'mode', ['id', 1, Config.IDLEN + 2], 'time', 'frompid', 'topid']
#struct msghead
size = parser.size
def unpack(self, str):
Util.Unpack(self, MsgHead.parser.unpack(str))
def pack(self):
return MsgHead.parser.pack(*Util.Pack(self))
def __init__(self, str = None):
if (str == None):
Util.InitStruct(self)
else:
self.unpack(str)
| {
"content_hash": "f12ad801ebaf4c4d65a63198594a310a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 101,
"avg_line_length": 24.041666666666668,
"alnum_prop": 0.5736568457538995,
"repo_name": "HenryHu/pybbs",
"id": "c1a31e210fb245d7267313337a6aa11ec0c7741e",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MsgHead.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "18422"
},
{
"name": "Python",
"bytes": "446796"
}
],
"symlink_target": ""
} |
import datetime
import time
from flask import g, session, request, redirect
from rrd import app, config
from rrd.view.utils import get_usertoken_from_session, get_current_user_profile
@app.template_filter('fmt_time')
def fmt_time_filter(value, pattern="%Y-%m-%d %H:%M"):
if not value:
return ''
return datetime.datetime.fromtimestamp(value).strftime(pattern)
@app.template_filter('time_duration')
def time_duration(v):
d = time.time() - time.mktime(v.timetuple())
if d <= 60:
return "just now"
if d <= 120:
return "1 minute ago"
if d <= 3600:
return "%d minutes ago" % (d/60)
if d <= 7200:
return "1 hour ago"
if d <= 3600*24:
return "%d hours ago" % (d/3600)
if d <= 3600*24*2:
return "1 day ago"
return "%d days ago" % (d/3600/24)
@app.teardown_request
def app_teardown(exception):
from rrd.store import db, alarm_db
db.commit()
alarm_db.commit()
@app.before_request
def app_before():
g.user_token = get_usertoken_from_session(session)
g.user = get_current_user_profile(g.user_token)
g.locale = request.accept_languages.best_match(config.LANGUAGES.keys())
path = request.path
if not g.user and not path.startswith("/auth/login") and \
not path.startswith("/static/") and \
not path.startswith("/portal/links/") and \
not path.startswith("/auth/register"):
return redirect("/auth/login")
if path.startswith("/screen"):
g.nav_menu = "nav_screen"
elif path.startswith("/portal/hostgroup") or path.startswith("/portal/group"):
g.nav_menu = "p_hostgroup"
elif path.startswith("/portal/template"):
g.nav_menu = "p_template"
elif path.startswith("/portal/expression"):
g.nav_menu = "p_expression"
elif path.startswith("/portal/nodata"):
g.nav_menu = "p_nodata"
elif path.startswith("/portal/alarm-dash"):
g.nav_menu = "p_alarm-dash"
else:
g.nav_menu = ""
| {
"content_hash": "5a63a7cde192136770acc18d641538fa",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 82,
"avg_line_length": 31.06153846153846,
"alnum_prop": 0.6220901436354631,
"repo_name": "open-falcon/dashboard",
"id": "6fa04eb88c9a253bb756dc3bf968f1ea3c5f99ca",
"size": "2619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rrd/view/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42940"
},
{
"name": "Dockerfile",
"bytes": "510"
},
{
"name": "HTML",
"bytes": "179072"
},
{
"name": "JavaScript",
"bytes": "848694"
},
{
"name": "Python",
"bytes": "195616"
},
{
"name": "Shell",
"bytes": "2972"
}
],
"symlink_target": ""
} |
from locust import HttpUser, TaskSet, task
import random
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
# but it might be convenient to use the @task decorator
@task
def page404(self):
self.client.get("/does_not_exist")
class WebsiteUser(HttpUser):
"""
User class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
# Most task inter-arrival times approximate to exponential distributions
# We will model this wait time as exponentially distributed with a mean of 1 second
wait_time = lambda self: random.expovariate(1)
tasks = [UserTasks]
def strictExp(min_wait, max_wait, mu=1):
"""
Returns an exponentially distributed time strictly between two bounds.
"""
while True:
x = random.expovariate(mu)
increment = (max_wait - min_wait) / (mu * 6.0)
result = min_wait + (x * increment)
if result < max_wait:
break
return result
class StrictWebsiteUser(HttpUser):
"""
User class that makes exponential requests but strictly between two bounds.
"""
host = "http://127.0.0.1:8089"
wait_time = lambda self: strictExp(3, 7)
tasks = [UserTasks]
| {
"content_hash": "6ff985ab334653325a8980b4eb8afa37",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 24.87272727272727,
"alnum_prop": 0.6476608187134503,
"repo_name": "locustio/locust",
"id": "05a5ff0501cac87404c675959c23f1eda1605cb2",
"size": "1368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/custom_wait_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "733"
},
{
"name": "HTML",
"bytes": "33145"
},
{
"name": "JavaScript",
"bytes": "17309"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Python",
"bytes": "914443"
},
{
"name": "Sass",
"bytes": "10379"
},
{
"name": "Shell",
"bytes": "3452"
}
],
"symlink_target": ""
} |
from pylcp.crud import base as crud
class PaymentAuth(crud.LCPCrud):
def create(self, path, payload):
return super(PaymentAuth, self).create(path, payload)
class PaymentCapture(crud.LCPCrud):
def create(self, path):
return super(PaymentCapture, self).create(path, '{}')
| {
"content_hash": "9872a075e5caa823c93dd01519d3e942",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 61,
"avg_line_length": 27.09090909090909,
"alnum_prop": 0.7013422818791947,
"repo_name": "Points/PyLCP",
"id": "bc304a4e11ee51d0d1745c887103d8ec18142edc",
"size": "298",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pylcp/crud/payment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "83023"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import dashboard
urlpatterns = patterns('',
url(r'^source/(?P<pk>\d+)/course/list/$', dashboard.SourceCourseListView.as_view(), name='source-course-list'),
url(r'^course/(?P<pk>\d+)/edit/$', dashboard.CourseFormEditView.as_view(), name='course-edit'),
url(r'^course/(?P<linkhash>\w+)/$', dashboard.CourseDetailView.as_view(lookup_field='linkhash'), name='course-view'),
url(r'^source/(?P<edit_key>\w+)/add/$', dashboard.CourseFormAddView.as_view(), name='course-add'),
) | {
"content_hash": "ef6ffc9bc67915633c67f724da077b88",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 121,
"avg_line_length": 42,
"alnum_prop": 0.6831501831501832,
"repo_name": "ocwc/ocwc-data",
"id": "3d483cb58010541ec7c11be1edc3af66f3669cac",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search/web/urls_dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16337"
},
{
"name": "HTML",
"bytes": "5468"
},
{
"name": "JavaScript",
"bytes": "67"
},
{
"name": "Python",
"bytes": "353428"
}
],
"symlink_target": ""
} |
'''
Module for interfacing with SysFS
.. seealso:: https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
.. versionadded:: 2016.3.0
'''
# Import python libs
from __future__ import absolute_import
import logging
import os
import stat
# Import external libs
import salt.ext.six as six
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on Linux
'''
return salt.utils.is_linux()
def attr(key, value=None):
'''
Access/write a SysFS attribute.
If the attribute is a symlink, it's destination is returned
:return: value or bool
CLI example:
.. code-block:: bash
salt '*' sysfs.attr block/sda/queue/logical_block_size
'''
key = target(key)
if key is False:
return False
elif os.path.isdir(key):
return key
elif value is not None:
return write(key, value)
else:
return read(key)
def write(key, value):
'''
Write a SysFS attribute/action
CLI example:
.. code-block:: bash
salt '*' sysfs.write devices/system/cpu/cpu0/cpufreq/scaling_governor 'performance'
'''
try:
key = target(key)
log.trace('Writing {0} to {1}'.format(value, key))
with salt.utils.fopen(key, 'w') as twriter:
twriter.write('{0}\n'.format(value))
return True
except: # pylint: disable=bare-except
return False
def read(key, root=''):
'''
Read from SysFS
:param key: file or path in SysFS; if key is a list then root will be prefixed on each key
:return: the full (tree of) SysFS attributes under key
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/net/em1/statistics
'''
if not isinstance(key, six.string_types):
res = {}
for akey in key:
ares = read(os.path.join(root, akey))
if ares is not False:
res[akey] = ares
return res
key = target(os.path.join(root, key))
if key is False:
return False
elif os.path.isdir(key):
keys = interfaces(key)
result = {}
for subkey in keys['r'] + keys['rw']:
subval = read(os.path.join(key, subkey))
if subval is not False:
subkeys = subkey.split('/')
subkey = subkeys.pop()
subresult = result
if len(subkeys):
for skey in subkeys:
if skey not in subresult:
subresult[skey] = {}
subresult = subresult[skey]
subresult[subkey] = subval
return result
else:
try:
log.trace('Reading {0}...'.format(key))
# Certain things in SysFS are pipes 'n such.
# This opens it non-blocking, which prevents indefinite blocking
with os.fdopen(os.open(key, os.O_RDONLY | os.O_NONBLOCK)) as treader:
# alternative method for the same idea, but only works for completely empty pipes
# treader = select.select([treader], [], [], 1)[0][0]
val = treader.read().strip()
if not val:
return False
try:
val = int(val)
except: # pylint: disable=bare-except
try:
val = float(val)
except: # pylint: disable=bare-except
pass
return val
except: # pylint: disable=bare-except
return False
def target(key, full=True):
'''
Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0
'''
if not key.startswith('/sys'):
key = os.path.join('/sys', key)
key = os.path.realpath(key)
if not os.path.exists(key):
log.debug('Unkown SysFS key {0}'.format(key))
return False
elif full:
return key
else:
return os.path.basename(key)
def interfaces(root):
'''
Generate a dictionary with all available interfaces relative to root.
Symlinks are not followed.
CLI example:
.. code-block:: bash
salt '*' sysfs.interfaces block/bcache0/bcache
Output example:
.. code-block:: json
{
"r": [
"state",
"partial_stripes_expensive",
"writeback_rate_debug",
"stripe_size",
"dirty_data",
"stats_total/cache_hits",
"stats_total/cache_bypass_misses",
"stats_total/bypassed",
"stats_total/cache_readaheads",
"stats_total/cache_hit_ratio",
"stats_total/cache_miss_collisions",
"stats_total/cache_misses",
"stats_total/cache_bypass_hits",
],
"rw": [
"writeback_rate",
"writeback_rate_update_seconds",
"cache_mode",
"writeback_delay",
"label",
"writeback_running",
"writeback_metadata",
"running",
"writeback_rate_p_term_inverse",
"sequential_cutoff",
"writeback_percent",
"writeback_rate_d_term",
"readahead"
],
"w": [
"stop",
"clear_stats",
"attach",
"detach"
]
}
.. note::
* 'r' interfaces are read-only
* 'w' interfaces are write-only (e.g. actions)
* 'rw' are interfaces that can both be read or written
'''
root = target(root)
if root is False or not os.path.isdir(root):
log.error('SysFS {0} not a dir'.format(root))
return False
readwrites = []
reads = []
writes = []
for path, _, files in os.walk(root, followlinks=False):
for afile in files:
canpath = os.path.join(path, afile)
if not os.path.isfile(canpath):
continue
stat_mode = os.stat(canpath).st_mode
is_r = bool(stat.S_IRUSR & stat_mode)
is_w = bool(stat.S_IWUSR & stat_mode)
relpath = os.path.relpath(canpath, root)
if is_w:
if is_r:
readwrites.append(relpath)
else:
writes.append(relpath)
elif is_r:
reads.append(relpath)
else:
log.warn('Unable to find any interfaces in {0}'.format(canpath))
return {
'r': reads,
'w': writes,
'rw': readwrites
}
| {
"content_hash": "c9a6cf2853514af4d977aef51dc34423",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 97,
"avg_line_length": 26.540856031128406,
"alnum_prop": 0.525142940917754,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "4c1eb675d31d53f59bc5628fc2f3f10dd9ee921c",
"size": "6845",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/modules/sysfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
import pathofexile.api
import pathofexile.ladder.analytics
def main():
# set default leagues
leagues = ['Standard', 'Hardcore']
# try to get a list of updated leagues from the API
try:
leagues = [league.get('id') for league in pathofexile.api.get_leagues()]
except:
pass
for league in leagues:
pathofexile.ladder.analytics.report(league)
if __name__ == '__main__':
main()
| {
"content_hash": "7a0fe5bb71e7daa7234ce61e029011de",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 80,
"avg_line_length": 25.11764705882353,
"alnum_prop": 0.6416861826697893,
"repo_name": "willroberts/pathofexile",
"id": "9cc82de2d05aacfe4b231339edcf3da742f168da",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analytics_report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1259"
},
{
"name": "HTML",
"bytes": "5031"
},
{
"name": "JavaScript",
"bytes": "6504"
},
{
"name": "Python",
"bytes": "33838"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib import messages
from getraenkewart.forms import RegistrationForm
REGISTRATION_SUCCESS = "Neuen Benutzer erfolgreich angelegt. Bitte warte auf deine Aktivierung."
LOGIN_SUCCESFUL = "Erfolgreich eingeloggt!"
LOGOUT_SUCCESFUL = "Erfolgreich ausgeloggt!"
WRONG_PASSWORD_ERROR = "Dein Passwort oder dein Benutzername ist falsch."
NOT_ACTIVE_ERROR = "Du bist noch nicht aktiviert! Frag mal nach."
def index(request):
return render (request, "getraenkewart/index.html")
def login_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate (username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
messages.success(request, LOGIN_SUCCESFUL)
return redirect(index)
else:
messages.error(request, NOT_ACTIVE_ERROR)
return redirect(index)
else:
messages.error(request, WRONG_PASSWORD_ERROR)
return redirect(index)
def logout_view(request):
logout(request)
messages.success(request, LOGOUT_SUCCESFUL)
return redirect(index)
def register(request):
if request.method =="GET":
form = RegistrationForm()
return render(request, "getraenkewart/register.html", {"form":form})
else:
form = RegistrationForm(request.POST)
form.full_clean()
if form.is_valid():
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
email = form.cleaned_data["email"]
username = form.cleaned_data['username']
password = form.cleaned_data['password']
password2 = form.cleaned_data['password2']
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user.is_active = False
user.save()
messages.success(request, REGISTRATION_SUCCESS)
return redirect(index)
else:
for _, error in form.errors.items():
messages.error(request, error)
return render(request, "getraenkewart/register.html", {"form":form})
| {
"content_hash": "ae7057471a8a3368313ba1c60de5ce4e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 96,
"avg_line_length": 38.95161290322581,
"alnum_prop": 0.6525879917184265,
"repo_name": "Faerbit/getraenkewart.de",
"id": "3ca468e0177927dd9e658d42471ea7e4486f7c00",
"size": "2415",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "getraenkewart/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11845"
},
{
"name": "JavaScript",
"bytes": "612"
},
{
"name": "Python",
"bytes": "46176"
}
],
"symlink_target": ""
} |
__author__ = 'kra869'
available_sections = ['modules', 'mods', 'configuration', 'config']
def init(context):
parser = context.register_command('show', command_show, help='Show information about armory repository')
global available_sections
parser.add_argument('sections', nargs='+', choices=available_sections)
return None
def command_show(args, context):
for section in args.sections:
if section == 'mods' or section == 'modules':
return command_show_mods(args, context)
elif section == 'configs' or section == 'configuration':
return command_show_config(args, context)
return None
def command_show_mods(args, context):
modules = context.modules.from_context(context)
print("{status:8} {name:30} {version:20} {description:50}".format(name="NAME", version="VERSION", status="STATUS", description="DESCRIPTION"))
for name, module in modules.items():
print("[{status:^6}] {name:30} {version:20} {description:50}".format(name=module.name, version=module.version, status=module.status, description=module.short_description))
return None
def command_show_config(args, context):
return None
| {
"content_hash": "1b0c346aea9002209744fdea8c4c0750",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 179,
"avg_line_length": 35,
"alnum_prop": 0.6857142857142857,
"repo_name": "mikaelbrandin/armory",
"id": "1fbcebcd327ee4f347cc718f1ab37212ae4ffb99",
"size": "1190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/show.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "90815"
}
],
"symlink_target": ""
} |
from pylons import tmpl_context as c
from tg import request, url
import json
import logging
from formencode import validators as fev
from webhelpers import paginate
import ew as ew_core
import ew.jinja2_ew as ew
log = logging.getLogger(__name__)
def onready(text):
return ew.JSScript('$(function () {%s});' % text)
class LabelList(fev.UnicodeString):
def __init__(self, *args, **kwargs):
kwargs.setdefault('if_empty', [])
super(LabelList, self).__init__(*args, **kwargs)
def _to_python(self, value, state):
value = super(LabelList, self)._to_python(value, state)
return value.split(',')
def _from_python(self, value, state):
value = ','.join(value)
value = super(LabelList, self)._from_python(value, state)
return value
class LabelEdit(ew.InputField):
template = 'jinja:allura:templates/widgets/label_edit.html'
validator = LabelList(if_empty=[])
defaults = dict(
ew.InputField.defaults,
name=None,
value=None,
className='',
show_label=True,
placeholder=None)
def from_python(self, value, state=None):
if isinstance(value, basestring):
return value
else:
return ','.join(value)
def resources(self):
yield ew.JSLink('allura/js/jquery-ui.min.js', location='body_top_js')
yield ew.JSLink('js/jquery.tagsinput.js')
yield ew.CSSLink('css/jquery.tagsinput.css')
yield onready('''
$('input.label_edit').tagsInput({
'height':'100%%',
'width':'100%%',
'autocomplete_url':'%(url)stags'
});
$('form').on('blur', '.ui-autocomplete-input', function() {
setTimeout(function(){
var clicked = $(document.activeElement); // This is the element that has focus
if (clicked.is('#ui-active-menuitem')) {
return false;
} else {
var value = $('div.tagsinput div input').val();
var exists = $('input.label_edit').tagExist(value);
var default_value = $('div.tagsinput div input').attr('data-default');
if ((value !== default_value) && (!exists) && value !== '') {
$('input.label_edit').addTag(value);
}
$('input[type=submit]', this).prop('disabled', true);
}
}, 1);
});
''' % dict(url=c.app.url))
class ProjectUserSelect(ew.InputField):
template = 'jinja:allura:templates/widgets/project_user_select.html'
defaults = dict(
ew.InputField.defaults,
name=None,
value=None,
show_label=True,
className=None)
def __init__(self, **kw):
super(ProjectUserSelect, self).__init__(**kw)
if not isinstance(self.value, list):
self.value = [self.value]
def from_python(self, value, state=None):
return value
def resources(self):
for r in super(ProjectUserSelect, self).resources():
yield r
yield ew.JSLink('allura/js/jquery-ui.min.js', location='body_top_js')
yield ew.CSSLink('css/autocomplete.css') # customized in [6b78ed] so we can't just use jquery-ui.min.css
yield onready('''
$('input.project_user_select').autocomplete({
source: function (request, response) {
$.ajax({
url: "%suser_search",
dataType: "json",
data: {
term: request.term
},
success: function (data) {
response(data.users);
}
});
},
minLength: 2
});''' % c.project.url())
class ProjectUserCombo(ew.SingleSelectField):
template = 'jinja:allura:templates/widgets/project_user_combo.html'
# No options for widget initially.
# It'll be populated later via ajax call.
options = []
def to_python(self, value, state):
# Skipping validation, 'cause widget has no values initially.
# All values loaded later via ajax.
return value
def resources(self):
for r in super(ProjectUserCombo, self).resources():
yield r
yield ew.JSLink('allura/js/jquery-ui.min.js', location='body_top_js')
yield ew.CSSLink('css/autocomplete.css') # customized in [6b78ed] so we can't just use jquery-ui.min.css
yield ew.CSSLink('css/combobox.css')
yield ew.JSLink('js/combobox.js')
yield onready('''
$('select.project-user-combobox').combobox({
source_url: "%susers"
});''' % c.project.url())
class NeighborhoodProjectSelect(ew.InputField):
template = 'jinja:allura:templates/widgets/neighborhood_project_select.html'
defaults = dict(
ew.InputField.defaults,
name=None,
value=None,
show_label=True,
className=None)
def __init__(self, url, **kw):
super(NeighborhoodProjectSelect, self).__init__(**kw)
if not isinstance(self.value, list):
self.value = [self.value]
self.url = url
def from_python(self, value, state=None):
return value
def resources(self):
for r in super(NeighborhoodProjectSelect, self).resources():
yield r
yield ew.JSLink('allura/js/jquery-ui.min.js', location='body_top_js')
yield ew.CSSLink('css/autocomplete.css') # customized in [6b78ed] so we can't just use jquery-ui.min.css
yield onready('''
$('input.neighborhood-project-select').autocomplete({
source: function (request, response) {
$.ajax({
url: "%s",
dataType: "json",
data: {
term: request.term
},
success: function (data) {
response(data.projects);
}
});
},
minLength: 3
});''' % self.url)
class AttachmentList(ew_core.Widget):
template = 'jinja:allura:templates/widgets/attachment_list.html'
defaults = dict(
ew_core.Widget.defaults,
attachments=None,
edit_mode=None)
class AttachmentAdd(ew_core.Widget):
template = 'jinja:allura:templates/widgets/attachment_add.html'
defaults = dict(
ew_core.Widget.defaults,
action=None,
name=None)
def resources(self):
for r in super(AttachmentAdd, self).resources():
yield r
yield onready('''
$(".attachment_form_add_button").click(function (evt) {
$(this).hide();
$(".attachment_form_fields", this.parentNode).show();
evt.preventDefault();
});
''')
class SubmitButton(ew.SubmitButton):
attrs = {'class': 'ui-state-default ui-button ui-button-text'}
class Radio(ew.InputField):
template = ew_core.render.Snippet('''<input {% if value %} checked{% endif %} {{widget.j2_attrs({
'id':id,
'type':field_type,
'name':rendered_name,
'class':css_class,
'readonly':readonly,
'value':value},
attrs)}}>''', 'jinja2')
defaults = dict(
ew.InputField.defaults,
field_type='radio')
class AutoResizeTextarea(ew.TextArea):
defaults = dict(
ew.TextArea.defaults,
name=None,
value=None,
css_class='auto_resize')
def resources(self):
yield ew.JSLink('js/jquery.autosize-min.js')
yield onready('''
$('textarea.auto_resize').focus(function(){$(this).autosize();});
''')
class MarkdownEdit(ew.TextArea):
template = 'jinja:allura:templates/widgets/markdown_edit.html'
validator = fev.UnicodeString()
defaults = dict(
ew.TextArea.defaults,
name=None,
value=None,
show_label=True)
def from_python(self, value, state=None):
return value
def resources(self):
for r in super(MarkdownEdit, self).resources():
yield r
yield ew.JSLink('js/jquery.lightbox_me.js')
yield ew.CSSLink('css/simplemde.min.css', compress=False)
yield ew.CSSLink('css/markitup_sf.css')
yield ew.JSLink('js/simplemde.min.js')
yield ew.JSLink('js/sf_markitup.js')
class PageList(ew_core.Widget):
template = 'jinja:allura:templates/widgets/page_list.html'
defaults = dict(
ew_core.Widget.defaults,
name=None,
limit=None,
count=0,
page=0,
show_label=True,
show_if_single_page=False,
force_next=False)
def paginator(self, count, page, limit, zero_based_pages=True):
page_offset = 1 if zero_based_pages else 0
limit = 10 if limit is None else limit
def page_url(page):
params = request.GET.copy()
params['page'] = page - page_offset
return url(request.path, params)
return paginate.Page(range(count), page + page_offset, int(limit),
url=page_url)
def prepare_context(self, context):
context = super(PageList, self).prepare_context(context)
count = context['count']
page = context['page']
limit = context['limit']
context['paginator'] = self.paginator(count, page, limit)
if context['force_next']:
context['paginator'].next_page = context['paginator'].page + 1
return context
def resources(self):
yield ew.CSSLink('css/page_list.css')
@property
def url_params(self, **kw):
url_params = dict()
for k, v in request.params.iteritems():
if k not in ['limit', 'count', 'page']:
url_params[k] = v
return url_params
class PageSize(ew_core.Widget):
template = 'jinja:allura:templates/widgets/page_size.html'
defaults = dict(
ew_core.Widget.defaults,
limit=None,
name=None,
count=0,
show_label=False)
@property
def url_params(self, **kw):
url_params = dict()
for k, v in request.params.iteritems():
if k not in ['limit', 'count', 'page']:
url_params[k] = v
return url_params
def resources(self):
yield onready('''
$('select.results_per_page').change(function () {
this.form.submit();});''')
class JQueryMixin(object):
js_widget_name = None
js_plugin_file = None
js_params = [
'container_cls'
]
defaults = dict(
container_cls='container')
def resources(self):
for r in super(JQueryMixin, self).resources():
yield r
if self.js_plugin_file is not None:
yield self.js_plugin_file
opts = dict(
(k, getattr(self, k))
for k in self.js_params)
yield onready('''
$(document).bind('clone', function () {
$('.%s').%s(%s); });
$(document).trigger('clone');
''' % (self.container_cls, self.js_widget_name, json.dumps(opts)))
class SortableRepeatedMixin(JQueryMixin):
js_widget_name = 'SortableRepeatedField'
js_plugin_file = ew.JSLink('js/sortable_repeated_field.js')
js_params = JQueryMixin.js_params + [
'field_cls',
'flist_cls',
'stub_cls',
'msg_cls',
'append_to',
'extra_field_on_focus_name',
]
defaults = dict(
container_cls='sortable-repeated-field',
field_cls='sortable-field',
flist_cls='sortable-field-list',
stub_cls='sortable-field-stub',
msg_cls='sortable-field-message',
append_to='top',
empty_msg='No fields have been defined',
nonempty_msg='Drag and drop the fields to reorder',
show_msg=True,
show_button=True,
extra_field_on_focus_name=None,
repetitions=0)
button = ew.InputField(
css_class='add', field_type='button', value='New Field')
class SortableRepeatedField(SortableRepeatedMixin, ew.RepeatedField):
template = 'genshi:allura.templates.widgets.sortable_repeated_field'
defaults = dict(
ew.RepeatedField.defaults,
**SortableRepeatedMixin.defaults)
class SortableTable(SortableRepeatedMixin, ew.TableField):
template = 'genshi:allura.templates.widgets.sortable_table'
defaults = dict(
ew.TableField.defaults,
**SortableRepeatedMixin.defaults)
class StateField(JQueryMixin, ew.CompoundField):
template = 'genshi:allura.templates.widgets.state_field'
js_widget_name = 'StateField'
js_plugin_file = ew.JSLink('js/state_field.js')
js_params = JQueryMixin.js_params + [
'selector_cls',
'field_cls',
]
defaults = dict(
ew.CompoundField.defaults,
js_params=js_params,
container_cls='state-field-container',
selector_cls='state-field-selector',
field_cls='state-field',
show_label=False,
selector=None,
states={},
)
@property
def fields(self):
return [self.selector] + self.states.values()
class DateField(JQueryMixin, ew.TextField):
js_widget_name = 'datepicker'
js_params = JQueryMixin.js_params
container_cls = 'ui-date-field'
defaults = dict(
ew.TextField.defaults,
container_cls='ui-date-field',
css_class='ui-date-field')
def resources(self):
for r in super(DateField, self).resources():
yield r
yield ew.JSLink('allura/js/jquery-ui.min.js', location='body_top_js')
yield ew.CSSLink('allura/css/smoothness/jquery-ui.min.css', compress=False) # compress will also serve from a different location, breaking image refs
class FieldCluster(ew.CompoundField):
template = 'genshi:allura.templates.widgets.field_cluster'
class AdminField(ew.InputField):
'''Field with the correct layout/etc for an admin page'''
template = 'jinja:allura:templates/widgets/admin_field.html'
defaults = dict(
ew.InputField.defaults,
field=None,
css_class=None,
errors=None)
def __init__(self, **kw):
super(AdminField, self).__init__(**kw)
for p in self.field.get_params():
setattr(self, p, getattr(self.field, p))
def resources(self):
for r in self.field.resources():
yield r
class Lightbox(ew_core.Widget):
template = 'jinja:allura:templates/widgets/lightbox.html'
defaults = dict(
name=None,
trigger=None,
options='',
content='',
content_template=None)
def resources(self):
yield ew.JSLink('js/jquery.lightbox_me.js')
yield onready('''
var $lightbox = $('#lightbox_%s');
$('body').on('click', '%s', function(e) {
e.preventDefault();
$lightbox.lightbox_me(%s);
});
$lightbox.on('click', '.close', function(e) {
e.preventDefault();
$lightbox.trigger('close');
});
''' % (self.name, self.trigger, self.options))
class DisplayOnlyField(ew.HiddenField):
'''
Render a field as plain text, optionally with a hidden field to preserve the value.
'''
template = ew.Snippet('''{{ (text or value or attrs.value)|e }}
{%- if with_hidden_input is none and name or with_hidden_input -%}
<input {{
widget.j2_attrs({
'type':'hidden',
'name':name,
'value':value,
'class':css_class}, attrs)
}}>
{%- endif %}''', 'jinja2')
defaults = dict(
ew.HiddenField.defaults,
text=None,
value=None,
with_hidden_input=None)
| {
"content_hash": "5d12b7270f3ccc0d85ed4cfc38178a89",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 158,
"avg_line_length": 31.24557956777996,
"alnum_prop": 0.5685990945674044,
"repo_name": "heiths/allura",
"id": "900e46eb6f2853fa7906897ae84c7b59d100fe60",
"size": "16774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Allura/allura/lib/widgets/form_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "173671"
},
{
"name": "HTML",
"bytes": "751039"
},
{
"name": "JavaScript",
"bytes": "1136845"
},
{
"name": "Makefile",
"bytes": "7788"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4238265"
},
{
"name": "RAML",
"bytes": "26153"
},
{
"name": "Ruby",
"bytes": "7006"
},
{
"name": "Shell",
"bytes": "131827"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
"""Commands for managing billing accounts and associate them with projects."""
from googlecloudsdk.api_lib.billing import utils
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Billing(base.Group):
"""Manage billing accounts and associate them with projects."""
def Filter(self, context, _):
context['billing_client'] = utils.GetClient()
context['billing_messages'] = utils.GetMessages()
| {
"content_hash": "80997bf3f791ed456e952029d429baab",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 34.38461538461539,
"alnum_prop": 0.7606263982102909,
"repo_name": "KaranToor/MA450",
"id": "673199d930cbc0943da17cd5da3d965f0577d85e",
"size": "1042",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/billing/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
import osv/power.hh
def __request_rebuild( reboots, sleep_seconds ):
counter = 0
try:
print 'trying to readfile...'
d = readfile( open('/reboot.log', 'r' ) )
print d
#counter = int( d )
counter = inline('(int)(d[0])') - 48
print counter
print '--------'
except:
print 'first bootup...'
counter += 1
print counter
#f = open('/reboot.log', 'w')
#s=cstr(str(counter))
#f.close()
if counter < reboots:
print 'sleeping...'
#sleep( sleep_seconds )
print 'rebooting...'
osv::reboot()
else:
print 'vm shutdown...'
osv::poweroff() | {
"content_hash": "38655f517a889993ad8ac9f5ad5f9450",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 48,
"avg_line_length": 18.766666666666666,
"alnum_prop": 0.6003552397868561,
"repo_name": "secureosv/pythia",
"id": "c15aa087f43951bf9e9e4cc6684c72f0c31633bb",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pythia/runtime/osv_builtins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23667"
},
{
"name": "HTML",
"bytes": "44433"
},
{
"name": "Perl",
"bytes": "66040"
},
{
"name": "Python",
"bytes": "464271"
},
{
"name": "Shell",
"bytes": "1274"
}
],
"symlink_target": ""
} |
"""
pyexcel_io.readers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
file readers
:copyright: (c) 2014-2022 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
from pyexcel_io.plugins import IOPluginInfoChainV2
IOPluginInfoChainV2(__name__).add_a_reader(
relative_plugin_class_path="csv_in_file.FileReader",
locations=["file"],
file_types=["csv", "tsv"],
stream_type="text",
).add_a_reader(
relative_plugin_class_path="csv_content.ContentReader",
locations=["content"],
file_types=["csv", "tsv"],
stream_type="text",
).add_a_reader(
relative_plugin_class_path="csv_in_memory.MemoryReader",
locations=["memory"],
file_types=["csv", "tsv"],
stream_type="text",
).add_a_reader(
relative_plugin_class_path="csvz.FileReader",
file_types=["csvz", "tsvz"],
locations=["file", "memory"],
stream_type="binary",
).add_a_reader(
relative_plugin_class_path="csvz.ContentReader",
file_types=["csvz", "tsvz"],
locations=["content"],
stream_type="binary",
)
| {
"content_hash": "3242fb2be5990284aeb1c4b5306c0cb7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 60,
"avg_line_length": 28.783783783783782,
"alnum_prop": 0.6262910798122066,
"repo_name": "chfw/pyexcel-io",
"id": "c597292422fb188174fc49e77bdbd3afd2cb02d6",
"size": "1065",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyexcel_io/readers/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "129"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "109782"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from ..utils import ignoring
from .core import (Array, stack, concatenate, take, tensordot, transpose,
from_array, choose, where, coarsen, insert, broadcast_to, ravel,
reshape, fromfunction, unique, store, squeeze, topk, bincount,
digitize, histogram, map_blocks, atop, to_hdf5, dot, cov, array,
dstack, vstack, hstack, to_npy_stack, from_npy_stack, compress,
from_delayed)
from .core import (logaddexp, logaddexp2, conj, exp, log, log2, log10, log1p,
expm1, sqrt, square, sin, cos, tan, arcsin, arccos, arctan, arctan2,
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg,
logical_and, logical_or, logical_xor, logical_not, maximum, minimum,
fmax, fmin, isreal, iscomplex, isfinite, isinf, isnan, signbit,
copysign, nextafter, ldexp, fmod, floor, ceil, trunc, degrees, radians,
rint, fix, angle, real, imag, clip, fabs, sign, absolute, frexp, modf,
around, isnull, notnull, isclose, eye, triu, tril, diag, corrcoef)
from .reductions import (sum, prod, mean, std, var, any, all, min, max, vnorm,
moment,
argmin, argmax,
nansum, nanmean, nanstd, nanvar, nanmin,
nanmax, nanargmin, nanargmax,
cumsum, cumprod)
from .percentile import percentile
with ignoring(ImportError):
from .reductions import nanprod, nancumprod, nancumsum
from . import random, linalg, ghost, learn, fft
from .wrap import ones, zeros, empty, full
from .rechunk import rechunk
from ..context import set_options
from ..base import compute
from .optimization import optimize
from .creation import arange, linspace
| {
"content_hash": "737ee31c111fd1baec98f70586e77e01",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 54.21212121212121,
"alnum_prop": 0.6629401900503075,
"repo_name": "cowlicks/dask",
"id": "3b87004933569320826ca6be4441289560098cac",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/array/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1366400"
}
],
"symlink_target": ""
} |
"""Provides functionality to notify people."""
from __future__ import annotations
import voluptuous as vol
import homeassistant.components.persistent_notification as pn
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import ( # noqa: F401
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
DOMAIN,
NOTIFY_SERVICE_SCHEMA,
PERSISTENT_NOTIFICATION_SERVICE_SCHEMA,
SERVICE_NOTIFY,
SERVICE_PERSISTENT_NOTIFICATION,
)
from .legacy import ( # noqa: F401
BaseNotificationService,
async_reload,
async_reset_platform,
async_setup_legacy,
check_templates_warn,
)
# Platform specific data
ATTR_TITLE_DEFAULT = "Home Assistant"
PLATFORM_SCHEMA = vol.Schema(
{vol.Required(CONF_PLATFORM): cv.string, vol.Optional(CONF_NAME): cv.string},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the notify services."""
await async_setup_legacy(hass, config)
async def persistent_notification(service: ServiceCall) -> None:
"""Send notification via the built-in persistsent_notify integration."""
message = service.data[ATTR_MESSAGE]
message.hass = hass
check_templates_warn(hass, message)
title = None
if title_tpl := service.data.get(ATTR_TITLE):
check_templates_warn(hass, title_tpl)
title_tpl.hass = hass
title = title_tpl.async_render(parse_result=False)
pn.async_create(hass, message.async_render(parse_result=False), title)
hass.services.async_register(
DOMAIN,
SERVICE_PERSISTENT_NOTIFICATION,
persistent_notification,
schema=PERSISTENT_NOTIFICATION_SERVICE_SCHEMA,
)
return True
| {
"content_hash": "fe0bb6d72e3f555c9ad3375c86dfb506",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 81,
"avg_line_length": 29.70769230769231,
"alnum_prop": 0.7027446918694976,
"repo_name": "GenericStudent/home-assistant",
"id": "bc8a7bffa952db9866f4af1d59fd38e262dff5ed",
"size": "1931",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.contrib.auth.models import BaseUserManager
class AccountManager(BaseUserManager):
def create_account(self, username, password, **kwargs):
if not username:
raise ValueError('Users must have a valid username')
if not password:
raise ValueError('Users must have a valid password.')
if not kwargs.get('email'):
raise ValueError('Users must have a valid email.')
account = self.model(
username=username, email=self.normalize_email(kwargs.get('email'))
)
account.is_active = True
account.set_password(password)
account.save()
return account
def create_superuser(self, username, password, **kwargs):
account = self.create_account(username, password, **kwargs)
account.is_superuser = True
account.save()
return account
class Account(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=40, unique=True)
email = models.EmailField() # users can share email
is_gadget = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def __str__(self):
return self.username
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
@property
def is_staff(self):
"Is the user a member of staff?"
return self.is_superuser | {
"content_hash": "57208aac14c494533ea0f795bb711884",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 27.107692307692307,
"alnum_prop": 0.6617480136208853,
"repo_name": "mik4el/gadget-board",
"id": "d4c077539127a8587cde3926a7050d5d0ff32e17",
"size": "1762",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/authentication/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50562"
},
{
"name": "Dockerfile",
"bytes": "544"
},
{
"name": "HTML",
"bytes": "13639"
},
{
"name": "JavaScript",
"bytes": "98722"
},
{
"name": "Python",
"bytes": "38075"
},
{
"name": "Shell",
"bytes": "1837"
},
{
"name": "TypeScript",
"bytes": "40719"
}
],
"symlink_target": ""
} |
import re
from setuptools import setup, find_packages
with open('dronedemo/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
setup(
name="dronedemo",
version=version,
author="Greg Taylor",
author_email="gtaylor@gc-taylor.com",
description="Drone + Python demo package.",
long_description=open('README.rst').read(),
license="Apache 2.0 License",
keywords="drone ci examplei",
url='https://github.com/drone-demos/drone-with-python',
install_requires=['flask'],
entry_points={
'console_scripts': [
'dronedemo = dronedemo.scripts.dronedemo:cli_entry',
]
},
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(exclude=['tests']),
package_data={'': ['LICENSE', '*.txt', '*.rst']},
tests_require=['nose'],
test_suite='nose.collector',
)
| {
"content_hash": "2322467658f81bc411e7ff3eaf158095",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 33.23809523809524,
"alnum_prop": 0.5959885386819485,
"repo_name": "drone-demos/drone-with-python",
"id": "6c8b8bb78a9d5b2c93e3e411edd511f7aaace6fe",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1608"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy import stats,special
import scipy as sp
from . import link_functions
from ..util.misc import chain_1, chain_2, chain_3, blockify_dhess_dtheta, blockify_third, blockify_hessian, safe_exp
from ..util.quad_integrate import quadgk_int
from scipy.integrate import quad
from functools import partial
import warnings
from ..core.parameterization import Parameterized
class Likelihood(Parameterized):
"""
Likelihood base class, used to defing p(y|f).
All instances use _inverse_ link functions, which can be swapped out. It is
expected that inheriting classes define a default inverse link function
To use this class, inherit and define missing functionality.
Inheriting classes *must* implement:
pdf_link : a bound method which turns the output of the link function into the pdf
logpdf_link : the logarithm of the above
To enable use with EP, inheriting classes *must* define:
TODO: a suitable derivative function for any parameters of the class
It is also desirable to define:
moments_match_ep : a function to compute the EP moments If this isn't defined, the moments will be computed using 1D quadrature.
To enable use with Laplace approximation, inheriting classes *must* define:
Some derivative functions *AS TODO*
For exact Gaussian inference, define *JH TODO*
"""
def __init__(self, gp_link, name):
super(Likelihood, self).__init__(name)
assert isinstance(gp_link,link_functions.GPTransformation), "gp_link is not a valid GPTransformation."
self.gp_link = gp_link
self.log_concave = False
self.not_block_really = False
self.name = name
def to_dict(self):
raise NotImplementedError
def _save_to_input_dict(self):
input_dict = {}
input_dict["name"] = self.name
input_dict["gp_link_dict"] = self.gp_link.to_dict()
return input_dict
@staticmethod
def from_dict(input_dict):
"""
Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object.
"""
import copy
input_dict = copy.deepcopy(input_dict)
likelihood_class = input_dict.pop('class')
input_dict["name"] = str(input_dict["name"])
name = input_dict.pop('name')
import GPy
likelihood_class = eval(likelihood_class)
return likelihood_class._build_from_input_dict(likelihood_class, input_dict)
@staticmethod
def _build_from_input_dict(likelihood_class, input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
gp_link_dict = input_dict.pop('gp_link_dict')
import GPy
gp_link = GPy.likelihoods.link_functions.GPTransformation.from_dict(gp_link_dict)
input_dict["gp_link"] = gp_link
return likelihood_class(**input_dict)
def request_num_latent_functions(self, Y):
"""
The likelihood should infer how many latent functions are needed for the likelihood
Default is the number of outputs
"""
return Y.shape[1]
def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None):
return np.zeros(self.size)
def update_gradients(self, partial):
if self.size > 0:
raise NotImplementedError('Must be implemented for likelihoods with parameters to be optimized')
def _preprocess_values(self,Y):
"""
In case it is needed, this function assess the output values or makes any pertinent transformation on them.
:param Y: observed output
:type Y: Nx1 numpy.darray
"""
return Y
def conditional_mean(self, gp):
"""
The mean of the random variable conditioned on one value of the GP
"""
raise NotImplementedError
def conditional_variance(self, gp):
"""
The variance of the random variable conditioned on one value of the GP
"""
raise NotImplementedError
def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None):
"""
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array
"""
assert y_test.shape==mu_star.shape
assert y_test.shape==var_star.shape
assert y_test.shape[1] == 1
flat_y_test = y_test.flatten()
flat_mu_star = mu_star.flatten()
flat_var_star = var_star.flatten()
if Y_metadata is not None:
#Need to zip individual elements of Y_metadata aswell
Y_metadata_flat = {}
if Y_metadata is not None:
for key, val in Y_metadata.items():
Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1,1)
zipped_values = []
for i in range(y_test.shape[0]):
y_m = {}
for key, val in Y_metadata_flat.items():
if np.isscalar(val) or val.shape[0] == 1:
y_m[key] = val
else:
#Won't broadcast yet
y_m[key] = val[i]
zipped_values.append((flat_y_test[i], flat_mu_star[i], flat_var_star[i], y_m))
else:
#Otherwise just pass along None's
zipped_values = zip(flat_y_test, flat_mu_star, flat_var_star, [None]*y_test.shape[0])
def integral_generator(yi, mi, vi, yi_m):
"""Generate a function which can be integrated
to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*"""
def f(fi_star):
#exponent = np.exp(-(1./(2*vi))*np.square(mi-fi_star))
#from GPy.util.misc import safe_exp
#exponent = safe_exp(exponent)
#res = safe_exp(self.logpdf(fi_star, yi, yi_m))*exponent
#More stable in the log space
res = np.exp(self.logpdf(fi_star, yi, yi_m)
- 0.5*np.log(2*np.pi*vi)
- 0.5*np.square(fi_star-mi)/vi)
if not np.isfinite(res):
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
return res
return f
p_ystar, _ = zip(*[quad(integral_generator(yi, mi, vi, yi_m), -np.inf, np.inf)
for yi, mi, vi, yi_m in zipped_values])
p_ystar = np.array(p_ystar).reshape(*y_test.shape)
return np.log(p_ystar)
def log_predictive_density_sampling(self, y_test, mu_star, var_star, Y_metadata=None, num_samples=1000):
"""
Calculation of the log predictive density via sampling
.. math:
log p(y_{*}|D) = log 1/num_samples prod^{S}_{s=1} p(y_{*}|f_{*s})
f_{*s} ~ p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array
:param num_samples: num samples of p(f_{*}|mu_{*}, var_{*}) to take
:type num_samples: int
"""
assert y_test.shape==mu_star.shape
assert y_test.shape==var_star.shape
assert y_test.shape[1] == 1
#Take samples of p(f*|y)
#fi_samples = np.random.randn(num_samples)*np.sqrt(var_star) + mu_star
fi_samples = np.random.normal(mu_star, np.sqrt(var_star), size=(mu_star.shape[0], num_samples))
from scipy.special import logsumexp
log_p_ystar = -np.log(num_samples) + logsumexp(self.logpdf(fi_samples, y_test, Y_metadata=Y_metadata), axis=1)
log_p_ystar = np.array(log_p_ystar).reshape(*y_test.shape)
return log_p_ystar
def moments_match_ep(self,obs,tau,v,Y_metadata_i=None):
"""
Calculation of moments using quadrature
:param obs: observed output
:param tau: cavity distribution 1st natural parameter (precision)
:param v: cavity distribution 2nd natural paramenter (mu*precision)
"""
#Compute first integral for zeroth moment.
#NOTE constant np.sqrt(2*pi/tau) added at the end of the function
mu = v/tau
sigma2 = 1./tau
#Lets do these for now based on the same idea as Gaussian quadrature
# i.e. multiply anything by close to zero, and its zero.
f_min = mu - 20*np.sqrt(sigma2)
f_max = mu + 20*np.sqrt(sigma2)
def int_1(f):
return self.pdf(f, obs, Y_metadata=Y_metadata_i)*np.exp(-0.5*tau*np.square(mu-f))
z_scaled, accuracy = quad(int_1, f_min, f_max)
#Compute second integral for first moment
def int_2(f):
return f*self.pdf(f, obs, Y_metadata=Y_metadata_i)*np.exp(-0.5*tau*np.square(mu-f))
mean, accuracy = quad(int_2, f_min, f_max)
mean /= z_scaled
#Compute integral for variance
def int_3(f):
return (f**2)*self.pdf(f, obs, Y_metadata=Y_metadata_i)*np.exp(-0.5*tau*np.square(mu-f))
Ef2, accuracy = quad(int_3, f_min, f_max)
Ef2 /= z_scaled
variance = Ef2 - mean**2
#Add constant to the zeroth moment
#NOTE: this constant is not needed in the other moments because it cancells out.
z = z_scaled/np.sqrt(2*np.pi/tau)
return z, mean, variance
#only compute gh points if required
__gh_points = None
def _gh_points(self, T=20):
if self.__gh_points is None:
self.__gh_points = np.polynomial.hermite.hermgauss(T)
return self.__gh_points
def ep_gradients(self, Y, cav_tau, cav_v, dL_dKdiag, Y_metadata=None, quad_mode='gk', boost_grad=1.):
if self.size > 0:
shape = Y.shape
tau,v,Y = cav_tau.flatten(), cav_v.flatten(),Y.flatten()
mu = v/tau
sigma2 = 1./tau
# assert Y.shape == v.shape
dlik_dtheta = np.empty((self.size, Y.shape[0]))
# for j in range(self.size):
Y_metadata_list = []
for index in range(len(Y)):
Y_metadata_i = {}
if Y_metadata is not None:
for key in Y_metadata.keys():
Y_metadata_i[key] = Y_metadata[key][index,:]
Y_metadata_list.append(Y_metadata_i)
if quad_mode == 'gk':
f = partial(self.integrate_gk)
quads = zip(*map(f, Y.flatten(), mu.flatten(), np.sqrt(sigma2.flatten()), Y_metadata_list))
quads = np.vstack(quads)
quads.reshape(self.size, shape[0], shape[1])
elif quad_mode == 'gh':
f = partial(self.integrate_gh)
quads = zip(*map(f, Y.flatten(), mu.flatten(), np.sqrt(sigma2.flatten())))
quads = np.hstack(list(quads))
quads = quads.T
else:
raise Exception("no other quadrature mode available")
# do a gaussian-hermite integration
dL_dtheta_avg = boost_grad * np.nanmean(quads, axis=1)
dL_dtheta = boost_grad * np.nansum(quads, axis=1)
# dL_dtheta = boost_grad * np.nansum(dlik_dtheta, axis=1)
else:
dL_dtheta = np.zeros(self.num_params)
return dL_dtheta
def integrate_gk(self, Y, mu, sigma, Y_metadata_i=None):
# gaussian-kronrod integration.
fmin = -np.inf
fmax = np.inf
SQRT_2PI = np.sqrt(2.*np.pi)
def generate_integral(f):
a = np.exp(self.logpdf_link(f, Y, Y_metadata_i)) * np.exp(-0.5 * np.square((f - mu) / sigma)) / (
SQRT_2PI * sigma)
fn1 = a * self.dlogpdf_dtheta(f, Y, Y_metadata_i)
fn = fn1
return fn
dF_dtheta_i = quadgk_int(generate_integral, fmin=fmin, fmax=fmax)
return dF_dtheta_i
def integrate_gh(self, Y, mu, sigma, Y_metadata_i=None, gh_points=None):
# gaussian-hermite quadrature.
# "calculate site derivatives E_f{d logp(y_i|f_i)/da} where a is a likelihood parameter
# and the expectation is over the exact marginal posterior, which is not gaussian- and is
# unnormalised product of the cavity distribution(a Gaussian) and the exact likelihood term.
#
# calculate the expectation wrt the approximate marginal posterior, which should be approximately the same.
# . This term is needed for evaluating the
# gradients of the marginal likelihood estimate Z_EP wrt likelihood parameters."
# "writing it explicitly "
# use them for gaussian-hermite quadrature
SQRT_2PI = np.sqrt(2.*np.pi)
if gh_points is None:
gh_x, gh_w = self._gh_points(32)
else:
gh_x, gh_w = gh_points
X = gh_x[None,:]*np.sqrt(2.)*sigma + mu
# Here X is a grid vector of possible fi values, while Y is just a single value which will be broadcasted.
a = np.exp(self.logpdf_link(X, Y, Y_metadata_i))
a = a.repeat(self.num_params,0)
b = self.dlogpdf_dtheta(X, Y, Y_metadata_i)
old_shape = b.shape
fn = np.array([i*j for i,j in zip(a.flatten(), b.flatten())])
fn = fn.reshape(old_shape)
dF_dtheta_i = np.dot(fn, gh_w)/np.sqrt(np.pi)
return dF_dtheta_i
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
"""
Use Gauss-Hermite Quadrature to compute
E_p(f) [ log p(y|f) ]
d/dm E_p(f) [ log p(y|f) ]
d/dv E_p(f) [ log p(y|f) ]
where p(f) is a Gaussian with mean m and variance v. The shapes of Y, m and v should match.
if no gh_points are passed, we construct them using defualt options
"""
if gh_points is None:
gh_x, gh_w = self._gh_points()
else:
gh_x, gh_w = gh_points
shape = m.shape
m,v,Y = m.flatten(), v.flatten(), Y.flatten()
#make a grid of points
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]
#evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.
# broadcast needs to be handled carefully.
logp = self.logpdf(X,Y[:,None], Y_metadata=Y_metadata)
dlogp_dx = self.dlogpdf_df(X, Y[:,None], Y_metadata=Y_metadata)
d2logp_dx2 = self.d2logpdf_df2(X, Y[:,None], Y_metadata=Y_metadata)
#clipping for numerical stability
#logp = np.clip(logp,-1e9,1e9)
#dlogp_dx = np.clip(dlogp_dx,-1e9,1e9)
#d2logp_dx2 = np.clip(d2logp_dx2,-1e9,1e9)
#average over the gird to get derivatives of the Gaussian's parameters
#division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi)
F = np.dot(logp, gh_w)/np.sqrt(np.pi)
dF_dm = np.dot(dlogp_dx, gh_w)/np.sqrt(np.pi)
dF_dv = np.dot(d2logp_dx2, gh_w)/np.sqrt(np.pi)
dF_dv /= 2.
if np.any(np.isnan(dF_dv)) or np.any(np.isinf(dF_dv)):
stop
if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)):
stop
if self.size:
dF_dtheta = self.dlogpdf_dtheta(X, Y[:,None], Y_metadata=Y_metadata) # Ntheta x (orig size) x N_{quad_points}
dF_dtheta = np.dot(dF_dtheta, gh_w)/np.sqrt(np.pi)
dF_dtheta = dF_dtheta.reshape(self.size, shape[0], shape[1])
else:
dF_dtheta = None # Not yet implemented
return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta
def predictive_mean(self, mu, variance, Y_metadata=None):
"""
Quadrature calculation of the predictive mean: E(Y_star|Y) = E( E(Y_star|f_star, Y) )
:param mu: mean of posterior
:param sigma: standard deviation of posterior
"""
#conditional_mean: the edpected value of y given some f, under this likelihood
fmin = -np.inf
fmax = np.inf
def int_mean(f,m,v):
exponent = -(0.5/v)*np.square(f - m)
#If exponent is under -30 then exp(exponent) will be very small, so don't exp it!)
#If p is zero then conditional_mean will overflow
assert v.all() > 0
p = safe_exp(exponent)
#If p is zero then conditional_variance will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_mean(f)*p
scaled_mean = [quad(int_mean, fmin, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance))
return mean
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None):
"""
Approximation to the predictive variance: V(Y_star)
The following variance decomposition is used:
V(Y_star) = E( V(Y_star|f_star)**2 ) + V( E(Y_star|f_star) )**2
:param mu: mean of posterior
:param sigma: standard deviation of posterior
:predictive_mean: output's predictive mean, if None _predictive_mean function will be called.
"""
#sigma2 = sigma**2
normalizer = np.sqrt(2*np.pi*variance)
fmin_v = -np.inf
fmin_m = np.inf
fmin = -np.inf
fmax = np.inf
from ..util.misc import safe_exp
# E( V(Y_star|f_star) )
def int_var(f,m,v):
exponent = -(0.5/v)*np.square(f - m)
p = safe_exp(exponent)
#If p is zero then conditional_variance will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_variance(f)*p
scaled_exp_variance = [quad(int_var, fmin_v, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
exp_var = np.array(scaled_exp_variance)[:,None] / normalizer
#V( E(Y_star|f_star) ) = E( E(Y_star|f_star)**2 ) - E( E(Y_star|f_star) )**2
#E( E(Y_star|f_star) )**2
if predictive_mean is None:
predictive_mean = self.predictive_mean(mu,variance)
predictive_mean_sq = predictive_mean**2
#E( E(Y_star|f_star)**2 )
def int_pred_mean_sq(f,m,v,predictive_mean_sq):
exponent = -(0.5/v)*np.square(f - m)
p = np.exp(exponent)
#If p is zero then conditional_mean**2 will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_mean(f)**2*p
scaled_exp_exp2 = [quad(int_pred_mean_sq, fmin_m, fmax,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)]
exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer
var_exp = exp_exp2 - predictive_mean_sq
# V(Y_star) = E[ V(Y_star|f_star) ] + V[ E(Y_star|f_star) ]
# V(Y_star) = E[ V(Y_star|f_star) ] + E(Y_star**2|f_star) - E[Y_star|f_star]**2
return exp_var + var_exp
def pdf_link(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_link_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_dlink_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d2logpdf_dlink2_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def pdf(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the likelihood (pdf) using it
.. math:
p(y|\\lambda(f))
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: likelihood evaluated for this point
:rtype: float
"""
if isinstance(self.gp_link, link_functions.Identity):
return self.pdf_link(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata)
def logpdf_sum(self, f, y, Y_metadata=None):
"""
Convenience function that can overridden for functions where this could
be computed more efficiently
"""
return np.sum(self.logpdf(f, y, Y_metadata=Y_metadata))
def logpdf(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the log likelihood (log pdf) using it
.. math:
\\log p(y|\\lambda(f))
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: log likelihood evaluated for this point
:rtype: float
"""
if isinstance(self.gp_link, link_functions.Identity):
return self.logpdf_link(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata)
def dlogpdf_df(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d\\log p(y|\\lambda(f))}{df} = \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d\\lambda(f)}{df}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: derivative of log likelihood evaluated for this point
:rtype: 1xN array
"""
if isinstance(self.gp_link, link_functions.Identity):
return self.dlogpdf_dlink(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
return chain_1(dlogpdf_dlink, dlink_df)
@blockify_hessian
def d2logpdf_df2(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the second derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{2}\\log p(y|\\lambda(f))}{df^{2}} = \\frac{d^{2}\\log p(y|\\lambda(f))}{d^{2}\\lambda(f)}\\left(\\frac{d\\lambda(f)}{df}\\right)^{2} + \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d^{2}\\lambda(f)}{df^{2}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: second derivative of log likelihood evaluated for this point (diagonal only)
:rtype: 1xN array
"""
if isinstance(self.gp_link, link_functions.Identity):
d2logpdf_df2 = self.d2logpdf_dlink2(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_df2 = chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2)
return d2logpdf_df2
@blockify_third
def d3logpdf_df3(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the third derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{3}\\log p(y|\\lambda(f))}{df^{3}} = \\frac{d^{3}\\log p(y|\\lambda(f)}{d\\lambda(f)^{3}}\\left(\\frac{d\\lambda(f)}{df}\\right)^{3} + 3\\frac{d^{2}\\log p(y|\\lambda(f)}{d\\lambda(f)^{2}}\\frac{d\\lambda(f)}{df}\\frac{d^{2}\\lambda(f)}{df^{2}} + \\frac{d\\log p(y|\\lambda(f)}{d\\lambda(f)}\\frac{d^{3}\\lambda(f)}{df^{3}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: third derivative of log likelihood evaluated for this point
:rtype: float
"""
if isinstance(self.gp_link, link_functions.Identity):
d3logpdf_df3 = self.d3logpdf_dlink3(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d3link_df3 = self.gp_link.d3transf_df3(f)
d3logpdf_df3 = chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3)
return d3logpdf_df3
def dlogpdf_dtheta(self, f, y, Y_metadata=None):
"""
TODO: Doc strings
"""
if self.size > 0:
if self.not_block_really:
raise NotImplementedError("Need to make a decorator for this!")
if isinstance(self.gp_link, link_functions.Identity):
return self.dlogpdf_link_dtheta(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.dlogpdf_link_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
else:
# There are no parameters so return an empty array for derivatives
return np.zeros((0, f.shape[0], f.shape[1]))
def dlogpdf_df_dtheta(self, f, y, Y_metadata=None):
"""
TODO: Doc strings
"""
if self.size > 0:
if self.not_block_really:
raise NotImplementedError("Need to make a decorator for this!")
if isinstance(self.gp_link, link_functions.Identity):
return self.dlogpdf_dlink_dtheta(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
dlogpdf_df_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
#Chain each parameter of hte likelihood seperately
for p in range(self.size):
dlogpdf_df_dtheta[p, :, :] = chain_1(dlogpdf_dlink_dtheta[p,:,:], dlink_df)
return dlogpdf_df_dtheta
#return chain_1(dlogpdf_dlink_dtheta, dlink_df)
else:
# There are no parameters so return an empty array for derivatives
return np.zeros((0, f.shape[0], f.shape[1]))
def d2logpdf_df2_dtheta(self, f, y, Y_metadata=None):
"""
TODO: Doc strings
"""
if self.size > 0:
if self.not_block_really:
raise NotImplementedError("Need to make a decorator for this!")
if isinstance(self.gp_link, link_functions.Identity):
return self.d2logpdf_dlink2_dtheta(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
dlink_df = self.gp_link.dtransf_df(f)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_dlink2_dtheta = self.d2logpdf_dlink2_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
d2logpdf_df2_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
#Chain each parameter of hte likelihood seperately
for p in range(self.size):
d2logpdf_df2_dtheta[p, :, :] = chain_2(d2logpdf_dlink2_dtheta[p,:,:], dlink_df, dlogpdf_dlink_dtheta[p,:,:], d2link_df2)
return d2logpdf_df2_dtheta
#return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2)
else:
# There are no parameters so return an empty array for derivatives
return np.zeros((0, f.shape[0], f.shape[1]))
def _laplace_gradients(self, f, y, Y_metadata=None):
dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, Y_metadata=Y_metadata)
dlogpdf_df_dtheta = self.dlogpdf_df_dtheta(f, y, Y_metadata=Y_metadata)
d2logpdf_df2_dtheta = self.d2logpdf_df2_dtheta(f, y, Y_metadata=Y_metadata)
#Parameters are stacked vertically. Must be listed in same order as 'get_param_names'
# ensure we have gradients for every parameter we want to optimize
assert dlogpdf_dtheta.shape[0] == self.size #num_param array x f, d
assert dlogpdf_df_dtheta.shape[0] == self.size #num_param x f x d x matrix or just num_param x f
assert d2logpdf_df2_dtheta.shape[0] == self.size #num_param x f matrix or num_param x f x d x matrix, num_param x f x f or num_param x f x f x d
return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
"""
Compute mean, variance of the predictive distibution.
:param mu: mean of the latent variable, f, of posterior
:param var: variance of the latent variable, f, of posterior
:param full_cov: whether to use the full covariance or just the diagonal
:type full_cov: Boolean
"""
try:
pred_mean = self.predictive_mean(mu, var, Y_metadata=Y_metadata)
pred_var = self.predictive_variance(mu, var, pred_mean, Y_metadata=Y_metadata)
except NotImplementedError:
print("Finding predictive mean and variance via sampling rather than quadrature")
Nf_samp = 300
Ny_samp = 1
s = np.random.randn(mu.shape[0], Nf_samp)*np.sqrt(var) + mu
ss_y = self.samples(s, Y_metadata, samples=Ny_samp)
pred_mean = np.mean(ss_y, axis=1)[:, None]
pred_var = np.var(ss_y, axis=1)[:, None]
return pred_mean, pred_var
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
#compute the quantiles by sampling!!!
Nf_samp = 300
Ny_samp = 1
s = np.random.randn(mu.shape[0], Nf_samp)*np.sqrt(var) + mu
ss_y = self.samples(s, Y_metadata)#, samples=Ny_samp)
#ss_y = ss_y.reshape(mu.shape[0], mu.shape[1], Nf_samp*Ny_samp)
pred_quantiles = [np.percentile(ss_y, q, axis=1)[:,None] for q in quantiles]
return pred_quantiles
def samples(self, gp, Y_metadata=None, samples=1):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
:param samples: number of samples to take for each f location
"""
raise NotImplementedError("""May be possible to use MCMC with user-tuning, see
MCMC_pdf_samples in likelihood.py and write samples function
using this, beware this is a simple implementation
of Metropolis and will not work well for all likelihoods""")
def MCMC_pdf_samples(self, fNew, num_samples=1000, starting_loc=None, stepsize=0.1, burn_in=1000, Y_metadata=None):
"""
Simple implementation of Metropolis sampling algorithm
Will run a parallel chain for each input dimension (treats each f independently)
Thus assumes f*_1 independant of f*_2 etc.
:param num_samples: Number of samples to take
:param fNew: f at which to sample around
:param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f
:param stepsize: Stepsize for the normal proposal distribution (will need modifying)
:param burnin: number of samples to use for burnin (will need modifying)
:param Y_metadata: Y_metadata for pdf
"""
print("Warning, using MCMC for sampling y*, needs to be tuned!")
if starting_loc is None:
starting_loc = fNew
from functools import partial
logpdf = partial(self.logpdf, f=fNew, Y_metadata=Y_metadata)
pdf = lambda y_star: np.exp(logpdf(y=y_star[:, None]))
#Should be the link function of f is a good starting point
#(i.e. the point before you corrupt it with the likelihood)
par_chains = starting_loc.shape[0]
chain_values = np.zeros((par_chains, num_samples))
chain_values[:, 0][:,None] = starting_loc
#Use same stepsize for all par_chains
stepsize = np.ones(par_chains)*stepsize
accepted = np.zeros((par_chains, num_samples+burn_in))
accept_ratio = np.zeros(num_samples+burn_in)
#Whilst burning in, only need to keep the previous lot
burnin_cache = np.zeros(par_chains)
burnin_cache[:] = starting_loc.flatten()
burning_in = True
for i in range(burn_in+num_samples):
next_ind = i-burn_in
if burning_in:
old_y = burnin_cache
else:
old_y = chain_values[:,next_ind-1]
old_lik = pdf(old_y)
#Propose new y from Gaussian proposal
new_y = np.random.normal(loc=old_y, scale=stepsize)
new_lik = pdf(new_y)
#Accept using Metropolis (not hastings) acceptance
#Always accepts if new_lik > old_lik
accept_probability = np.minimum(1, new_lik/old_lik)
u = np.random.uniform(0,1,par_chains)
#print "Accept prob: ", accept_probability
accepts = u < accept_probability
if burning_in:
burnin_cache[accepts] = new_y[accepts]
burnin_cache[~accepts] = old_y[~accepts]
if i == burn_in:
burning_in = False
chain_values[:,0] = burnin_cache
else:
#If it was accepted then new_y becomes the latest sample
chain_values[accepts, next_ind] = new_y[accepts]
#Otherwise use old y as the sample
chain_values[~accepts, next_ind] = old_y[~accepts]
accepted[~accepts, i] = 0
accepted[accepts, i] = 1
accept_ratio[i] = np.sum(accepted[:,i])/float(par_chains)
#Show progress
if i % int((burn_in+num_samples)*0.1) == 0:
print("{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i))
print("Last run accept ratio: ", accept_ratio[i])
print("Average accept ratio: ", np.mean(accept_ratio))
return chain_values
| {
"content_hash": "cef990ee47a420b28bdbb339a3d77f25",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 344,
"avg_line_length": 42.96113074204947,
"alnum_prop": 0.5848823819707188,
"repo_name": "SheffieldML/GPy",
"id": "0f17a883b1d70a63af83668e2dad46f929dbac36",
"size": "36595",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "GPy/likelihoods/likelihood.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C",
"bytes": "2030"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Cython",
"bytes": "49903"
},
{
"name": "Python",
"bytes": "2344657"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import requests
from zerotest.utils.generator_helper import dict_to_param_style_code
from zerotest.utils.url_helper import urlparse
from zerotest.utils.encode_helper import ensure_unicode
class Request(object):
def __init__(self, scheme=None, method=None, params=None, host=None, path=None, headers=None, data=None,
endpoint=None):
self.scheme = scheme
self.method = method
self.headers = headers
self.host = host
self.path = path
self.params = ensure_unicode(params)
self.data = ensure_unicode(data)
if endpoint:
self.endpoint = endpoint
@property
def endpoint(self):
return "{scheme}://{host}".format(**self.__dict__)
@endpoint.setter
def endpoint(self, endpoint):
parsed = urlparse(endpoint)
self.scheme = parsed.scheme
if parsed.port:
host = "{}:{}".format(parsed.hostname, parsed.port)
else:
host = parsed.hostname
self.host = host
@property
def url(self):
return "{}{}".format(self.endpoint, self.path)
def send_request(self, verify=True):
return requests.request(self.method, self.url, headers=self.headers,
params=self.params, data=self.data,
stream=True, allow_redirects=False, verify=verify)
def __eq__(self, other):
if type(other) != Request:
return False
return self.__dict__ == other.__dict__
def __str__(self):
return """[{method}]{url}
{headers}
{data}""".format(method=self.method, url=self.url, headers=self.headers, data=self.data)
def __repr__(self):
return '{}({})'.format(Request.__name__,
dict_to_param_style_code({k: v for k, v in self.__dict__.items() if v}))
| {
"content_hash": "db2fea1e590329bb8685992339febed9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 108,
"avg_line_length": 32.20338983050848,
"alnum_prop": 0.5868421052631579,
"repo_name": "jjyr/zerotest",
"id": "b976113441db51fea516b81d3ad030a0a3325968",
"size": "1900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerotest/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60381"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def smallcatRF():
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
#Log.info("Importing alphabet_cattest.csv data...\n")
alphabet = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/alphabet_cattest.csv"))
alphabet["y"] = alphabet["y"].asfactor()
#Log.info("Summary of alphabet_cattest.csv from H2O:\n")
#alphabet.summary()
# Prepare data for scikit use
trainData = np.loadtxt(pyunit_utils.locate("smalldata/gbm_test/alphabet_cattest.csv"), delimiter=',', skiprows=1,
converters={0:lambda s: ord(s.split("\"")[1])})
trainDataResponse = trainData[:,1]
trainDataFeatures = trainData[:,0]
# Train H2O GBM Model:
#Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
rf_h2o = H2ORandomForestEstimator(ntrees=1, max_depth=1, nbins=100)
rf_h2o.train(x='X', y="y", training_frame=alphabet)
# Train scikit GBM Model:
# Log.info("scikit GBM with same parameters:")
rf_sci = ensemble.RandomForestClassifier(n_estimators=1, criterion='entropy', max_depth=1)
rf_sci.fit(trainDataFeatures[:,np.newaxis],trainDataResponse)
# h2o
rf_perf = rf_h2o.model_performance(alphabet)
auc_h2o = rf_perf.auc()
# scikit
auc_sci = roc_auc_score(trainDataResponse, rf_sci.predict_proba(trainDataFeatures[:,np.newaxis])[:,1])
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
pyunit_utils.standalone_test(smallcatRF)
else:
smallcatRF()
| {
"content_hash": "e887e38736e8772b95ffb11d6aedadf4",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 115,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.693573264781491,
"repo_name": "madmax983/h2o-3",
"id": "0d43988c4ecda8fe35ff5b9743468b231b5c9a47",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/rf/pyunit_smallcatRF.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162402"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "HTML",
"bytes": "139398"
},
{
"name": "Java",
"bytes": "5770492"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34048"
},
{
"name": "Python",
"bytes": "2721983"
},
{
"name": "R",
"bytes": "1611237"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22834"
},
{
"name": "Shell",
"bytes": "46382"
},
{
"name": "TeX",
"bytes": "535732"
}
],
"symlink_target": ""
} |
import sys
import unittest
class DocutilsTests(unittest.TestCase):
def test_publish_string(self):
from docutils.core import publish_string
s = publish_string('foo', writer_name='html')
if sys.version_info[0] >= 3:
s = s.decode('utf-8')
self.assertTrue(s.startswith('<?xml version="1.0" encoding="utf-8" ?>'))
| {
"content_hash": "965776e703a15db51f68b2eca68dd465",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6243093922651933,
"repo_name": "Jarn/jarn.viewdoc",
"id": "a591e11e1eb568b5ca370e6b9a4349de70499b27",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_docutils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "36879"
}
],
"symlink_target": ""
} |
from django import forms
from users.models import AdminUser, MemberUser
class AdminUserForm(forms.Form):
username = forms.CharField(label='Username', max_length=10)
first_name = forms.CharField(label='First Name', max_length=30)
last_name = forms.CharField(label='Last Name', max_length=30)
email = forms.EmailField(label='Email', widget=forms.EmailInput)
password = forms.CharField(label='Password', widget=forms.PasswordInput, min_length=8, required=False)
bio = forms.CharField(label='Bio', widget=forms.Textarea)
def clean_username(self):
username = self.cleaned_data.get('username')
if AdminUser.objects.filter(username=username).exists():
raise forms.ValidationError("Username exists in the system.")
return username
def clean_email(self):
email = self.cleaned_data.get('email')
if AdminUser.objects.filter(email=email).exists():
raise forms.ValidationError("Email exists in the system.")
return email
class Meta:
model = AdminUser
class AdminUpdateForm(forms.Form):
first_name = forms.CharField(label='First Name', max_length=30)
last_name = forms.CharField(label='Last Name', max_length=30)
email = forms.EmailField(label='Email', widget=forms.EmailInput)
password = forms.CharField(label='Password', widget=forms.PasswordInput, min_length=8, required=False)
bio = forms.CharField(label='Bio', widget=forms.Textarea)
def clean_email(self):
if 'email' in self.changed_data:
if AdminUser.objects.filter(email=self.cleaned_data.get('email')).exists():
raise forms.ValidationError("Email exists in the system.")
print('changed!')
else:
print('no!')
return self.cleaned_data.get('email')
class Meta:
model = AdminUser
class PasswordForm(forms.Form):
password = forms.CharField(label='Password', widget=forms.PasswordInput, min_length=8, required=False)
class MemberUpdateForm(forms.Form):
first_name = forms.CharField(label='First Name', max_length=30)
last_name = forms.CharField(label='Last Name', max_length=30)
email = forms.EmailField(label='Email', widget=forms.EmailInput)
password = forms.CharField(label='Password', widget=forms.PasswordInput, min_length=6, required=False)
bio = forms.CharField(label='Bio', widget=forms.Textarea)
def clean_email(self):
if 'email' in self.changed_data:
if MemberUser.objects.filter(email=self.cleaned_data.get('email')).exists():
raise forms.ValidationError("Email exists in the system.")
print('changed!')
else:
print('no!')
return self.cleaned_data.get('email')
class Meta:
model = MemberUser | {
"content_hash": "a57d56866f97575ad8f8ffa35962dce2",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 106,
"avg_line_length": 40,
"alnum_prop": 0.6742857142857143,
"repo_name": "mmunchkinn/Hex-Omega",
"id": "1ec28682041349a29f547b4f76672938e7018f00",
"size": "2800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/user_form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "666145"
},
{
"name": "HTML",
"bytes": "181523"
},
{
"name": "JavaScript",
"bytes": "146980"
},
{
"name": "Python",
"bytes": "55263"
}
],
"symlink_target": ""
} |
import atexit
import requests
from tools import cli
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
from tools import tasks
# disable urllib3 warnings
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
def delete_virtual_disk(si, vm_obj, disk_number):
""" Deletes virtual Disk based on disk number
:param si: Service Instance
:param vm_obj: Virtual Machine Object
:param disk_number: Hard Disk Unit Number
:return: True if success
"""
hdd_prefix_label = 'Hard disk '
hdd_label = hdd_prefix_label + str(disk_number)
virtual_hdd_device = None
for dev in vm_obj.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) \
and dev.deviceInfo.label == hdd_label:
virtual_hdd_device = dev
if not virtual_hdd_device:
raise RuntimeError('Virtual {} could not '
'be found.'.format(virtual_hdd_device))
virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec()
virtual_hdd_spec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.remove
virtual_hdd_spec.device = virtual_hdd_device
spec = vim.vm.ConfigSpec()
spec.deviceChange = [virtual_hdd_spec]
task = vm_obj.ReconfigVM_Task(spec=spec)
tasks.wait_for_tasks(si, [task])
return True
def get_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--vmname', required=True,
help="Name of the VirtualMachine you want to change.")
parser.add_argument('-m', '--unitnumber', required=True,
help='HDD number to delete.', type=int)
parser.add_argument('-y', '--yes',
help='Confirm disk deletion.', action='store_true')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vim_type, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vim_type, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def prompt_y_n_question(question, default="no"):
""" based on:
http://code.activestate.com/recipes/577058/
:param question: Question to ask
:param default: No
:return: True/False
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("Invalid default answer: '{}'".format(default))
while True:
print(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please, respond with 'yes' or 'no' or 'y' or 'n'.")
def main():
args = get_args()
# connect to vc
si = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
# disconnect vc
atexit.register(Disconnect, si)
content = si.RetrieveContent()
print('Searching for VM {}'.format(args.vmname))
vm_obj = get_obj(content, [vim.VirtualMachine], args.vmname)
if vm_obj:
if not args.yes:
cli.prompt_y_n_question("Are you sure you want "
"to delete HDD "
"{}?".format(args.unitnumber),
default='no')
delete_virtual_disk(si, vm_obj, args.unitnumber)
print ('VM HDD "{}" successfully deleted.'.format(args.unitnumber))
else:
print ('VM not found')
# start
if __name__ == "__main__":
main()
| {
"content_hash": "c8fd6034d8b8d3e3ee10d656bd8e1b73",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 31.403225806451612,
"alnum_prop": 0.5906522855675398,
"repo_name": "pfitzer/pyvmomi-community-samples",
"id": "a3010119d749d25082f6479e7258e1cfa7b3dab7",
"size": "4197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/delete_disk_from_vm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1612"
}
],
"symlink_target": ""
} |
import os.path as op
from os import environ
import time
import logging
import shutil
from Bio import SeqIO
from . import conf, errors, helper, structure_tools
logger = logging.getLogger(__name__)
class TCoffee(object):
"""Alignes sequences using t_coffee in expresso mode."""
def __init__(self, alignment_fasta_file, mode, pdb_file=None):
"""
"""
self.alignment_id = op.splitext(op.basename(alignment_fasta_file))[0]
self.alignment_fasta_file = op.join(
conf.CONFIGS['tcoffee_dir'], self.alignment_id + '.fasta')
if alignment_fasta_file != self.alignment_fasta_file:
shutil.copy(alignment_fasta_file, self.alignment_fasta_file)
self.target_seqrecord, self.template_seqrecord = (
list(SeqIO.parse(self.alignment_fasta_file, 'fasta'))
)
self.mode = mode
if pdb_file is not None:
self.pdb_id, self.pdb_file = self._clean_pdb(pdb_file)
# Write a template file for the sequences to be aligned
self.alignment_template_file = (
op.join(conf.CONFIGS['tcoffee_dir'], '{}.template_list'.format(self.alignment_id))
)
with open(self.alignment_template_file, 'w') as fh:
fh.writelines([
">" + self.target_seqrecord.id + " _P_ " + self.pdb_id.upper() + "\n"
">" + self.template_seqrecord.id + " _P_ " + self.pdb_id.upper() + "\n"
])
def _clean_pdb(self, pdb_file):
"""Write a template PDB file in a format that is compatible with t_coffee."""
message = (
"Cleaning pdb {} to serve as a template for t_coffee...".format(pdb_file)
)
logger.debug(message)
pdb_id = structure_tools.get_pdb_id(pdb_file)
pdb_file_new = op.join(
conf.CONFIGS['tcoffee_dir'], pdb_id + '.pdb')
system_command = (
"t_coffee -other_pg extract_from_pdb {} > {}".format(pdb_file, pdb_file_new)
)
p = helper.run(system_command, cwd=conf.CONFIGS['tcoffee_dir'])
if p.returncode != 0:
logger.error("Error cleaning pdb!")
logger.error("System command: '{}'".format(system_command))
logger.error("Result:\n{}".format(p.stdout))
logger.error("Error message:\n{}".format(p.stderr))
time.sleep(0.2)
return pdb_id, pdb_file_new
def _get_tcoffee_system_command(
self, alignment_fasta_file, alignment_template_file, alignment_output_file, mode):
""".
Parameters
----------
alignment_fasta_file : str
Name of the file that contains the sequences to be aligned in fasta format.
alignment_template_file : str
Name of the file that contains the structureal templates to be used for
structure-assisted alignments.
alignment_output_file : str
Name of the file where the alignment should be saved.
mode : str
T-coffee mode the should be run.
Returns
--------
system_command : str
System call that runs tcoffee.
tcoffee_env : str
A dictionary of environment variables to run tcoffee in a thread-safe manner.
"""
# Environment variables
# To be able to run parallel instances of t_coffee, the environment
# variables have to be set to unique paths for each t_coffee call.
# Also, make sure to change the directory to a unique one bevore
# calling t_coffee.
tcoffee_env = environ.copy()
tcoffee_env['HOME_4_TCOFFEE'] = op.join(conf.CONFIGS['tcoffee_dir'])
tcoffee_env['TMP_4_TCOFFEE'] = op.join(conf.CONFIGS['tcoffee_dir'], 'tmp')
tcoffee_env['CACHE_4_TCOFFEE'] = op.join(conf.CONFIGS['tcoffee_dir'], 'cache')
tcoffee_env['LOCKDIR_4_TCOFFEE'] = op.join(conf.CONFIGS['tcoffee_dir'], 'lck')
tcoffee_env['ERRORFILE_4_TCOFFEE'] = (
op.join(conf.CONFIGS['tcoffee_dir'], 't_coffee.ErrorReport')
)
tcoffee_env['BLASTDB'] = conf.CONFIGS['blast_db_dir']
tcoffee_env['PDB_DIR'] = conf.CONFIGS['pdb_dir']
tcoffee_env['NO_REMOTE_PDB_DIR'] = '1'
# Print a command that can be used to set environmental variables
t_coffee_environment_variables = [
'HOME_4_TCOFFEE', 'TMP_4_TCOFFEE', 'CACHE_4_TCOFFEE', 'LOCKDIR_4_TCOFFEE',
'ERRORFILE_4_TCOFFEE', 'BLASTDB', 'PDB_DIR', 'NO_REMOTE_PDB_DIR']
exports = [
'export {}={}'.format(x, tcoffee_env.get(x, '$' + x))
for x in t_coffee_environment_variables
]
message = (
"\nSystem command for setting environmental variables:\n" + ' && '.join(exports)
)
logger.debug(message)
# ### System command
# Use the following command to clean the pdb file (add headers, etc.)
# 't_coffee -other_pg extract_from_pdb 32c2A.pdb > template.pdb '
# Use the folllowing command to perform the most accurate alignment
# 't_coffee -mode 3dcoffee -method sap_pair,mustang_pair,TMalign_pair
# -blast_server=LOCAL -pdb_db=pdbaa -protein_db=nr -outorder=input
# -output fasta_aln -outfile tcoffee_output.aln -seq seqfiles.fasta
# -pdb_min_sim=20 -template_file seqfiles.template '
multi_core_option = (
'{}'.format(conf.CONFIGS['n_cores'])
if conf.CONFIGS['n_cores'] and int(conf.CONFIGS['n_cores']) > 1 else 'no'
)
n_core_option = '{}'.format(conf.CONFIGS['n_cores']) if conf.CONFIGS['n_cores'] else '1'
protein_db = op.join(conf.CONFIGS['blast_db_dir'], 'nr')
pdb_db = op.join(conf.CONFIGS['blast_db_dir'], 'pdbaa')
if mode == '3dcoffee':
system_command = (
"t_coffee " +
" -seq " + alignment_fasta_file +
" -method=sap_pair,mustang_pair,TMalign_pair " +
" -blast_server=LOCAL " +
" -protein_db=" + protein_db +
" -pdb_db=" + pdb_db +
" -outorder=input" +
" -output=fasta_aln" +
" -pdb_min_sim=30" +
# " -quiet" +
# " -no_warning" +
" -outfile=" + alignment_output_file +
" -multi_core=no" +
" -n_core=" + n_core_option +
" -template_file=" + alignment_template_file
)
if mode == 'expresso':
system_command = (
't_coffee' +
' -mode expresso' +
' -method sap_pair' +
' -seq ' + alignment_fasta_file +
' -blast_server=LOCAL' +
" -protein_db=" + protein_db +
" -pdb_db=" + pdb_db +
' -outorder=input' +
' -output fasta_aln' +
' -quiet -no_warning' +
' -outfile=' + alignment_output_file +
' -cache ignore ' +
' -multi_core ' + multi_core_option + # AS changed !!!
' -n_core ' + n_core_option # AS changed !!!
)
if mode == 't_coffee':
system_command = (
't_coffee' +
' -mode expresso' +
' -method clustalw_pair,slow_pair' +
' -seq ' + alignment_fasta_file +
' -blast_server=LOCAL' +
" -protein_db=" + protein_db +
" -pdb_db=" + pdb_db +
' -outorder=input' +
' -output fasta_aln' +
' -quiet -no_warning' +
' -outfile=' + alignment_output_file +
' -multi_core ' + multi_core_option + # AS changed !!!
' -n_core ' + n_core_option # AS changed !!!
)
if mode == 'quick':
system_command = (
't_coffee' +
' -mode quickaln' +
' -method clustalw_pair,slow_pair' +
' -seq ' + alignment_fasta_file +
' -blast_server=LOCAL' +
" -protein_db=" + protein_db +
" -pdb_db=" + pdb_db +
' -outorder=input' +
' -output fasta_aln' +
' -quiet -no_warning' +
' -outfile=' + alignment_output_file +
' -multi_core ' + multi_core_option + # AS changed !!!
' -n_core ' + n_core_option # AS changed !!!
)
return system_command, tcoffee_env
def align(self, GAPOPEN=-0.0, GAPEXTEND=-0.0):
"""Call t_coffee (make sure BLAST is installed locally!).
Parameters
----------
alignment_fasta_file : string
A file containing the fasta sequences to be aligned
alignment_template_file : string
A file containing the structural templates for the fasta sequences
described above
GAPOPEN : int or str
See t_coffee manual
GAPEXTEND : int or str
See t_coffee manual
Returns
--------
alignment_output_file : str
Name of file which contains the alignment in fasta format.
"""
# try the alignment in expresso mode (structure based with sap alignment)
alignment_output_file = op.join(conf.CONFIGS['tcoffee_dir'], self.alignment_id + '.aln')
system_command, tcoffee_env = self._get_tcoffee_system_command(
self.alignment_fasta_file, self.alignment_template_file, alignment_output_file,
self.mode)
# Perform t_coffee alignment
logger.debug("\nTCoffee system command:\n{}".format(system_command))
p = helper.run(system_command, cwd=conf.CONFIGS['tcoffee_dir'], env=tcoffee_env)
logger.debug("t_coffee results:\n{}".format(p.stdout))
error_message_summary_idx = p.stderr.find(
'* MESSAGES RECAPITULATION')
if error_message_summary_idx == -1:
error_message_summary = ''
else:
error_message_summary = p.stderr[error_message_summary_idx:]
logger.debug("t_coffee errors:\n{}".format(error_message_summary.strip()))
# Check if tcoffee had an unexpected exit and if not, create and return
# the alignment object
if p.returncode == 0:
logger.info("Successfully made the alignment")
return alignment_output_file
else:
logger.error(
'Structural alignment failed with the following error: {}'.format(p.stderr))
logger.error('Running quickalign alignment instead...')
system_command, tcoffee_env = self._get_tcoffee_system_command(
self.alignment_fasta_file, self.alignment_template_file, alignment_output_file,
'quick')
p = helper.run(system_command, cwd=conf.CONFIGS['tcoffee_dir'], env=tcoffee_env)
if p.returncode == 0:
return alignment_output_file
else:
logger.error('Even quickaln didn\'t work. Cannot create an alignment. Giving up.')
raise errors.TcoffeeError(
p.stdout, p.stderr, self.alignment_fasta_file, system_command)
| {
"content_hash": "570fcb83fdcac7d7a683d0ee46c92bd1",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 98,
"avg_line_length": 43.64230769230769,
"alnum_prop": 0.5417290913897946,
"repo_name": "ostrokach/elaspic",
"id": "c6081b1080e56b762ce7b3fbe1614ed31041ef16",
"size": "11347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elaspic/call_tcoffee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "423563"
},
{
"name": "Shell",
"bytes": "6778"
}
],
"symlink_target": ""
} |
""" CISCO_RADIUS_EXT_MIB
This MIB module defines objects describing RADIUS (Remote
Access Dialin User Service), serving as an extension of the
following MIB modules\:
\-
\- RADIUS\-AUTH\-CLIENT\-MIB [RFC4668]
\- RADIUS\-AUTH\-SERVER\-MIB [RFC4669]
\- RADIUS\-ACC\-CLIENT\-MIB [RFC4670]
\- RADIUS\-ACC\-SERVER\-MIB [RFC4671]
\- RADIUS\-DYNAUTH\-CLIENT\-MIB [RFC4672]
\- RADIUS\-DYNAUTH\-SERVER\-MIB [RFC4673]
\-
[RFC4668] D. Nelson, RADIUS Authentication Client MIB for IPv6,
RFC\-4668, August 2006.
\-
[RFC4669] D. Nelson, RADIUS Authentication Server MIB for IPv6,
RFC\-4669, August 2006.
\-
[RFC4670] D. Nelson, RADIUS Accounting Client MIB for IPv6,
RFC\-4670, August 2006.
\-
[RFC4671] D. Nelson, RADIUS Accounting Server MIB for IPv6,
RFC\-4671, August 2006.
\-
[RFC4672] S. De Cnodder, N. Jonnala, M. Chiba, RADIUS Dynamic
Authorization Client MIB, RFC\-4672, September 2006.
\-
[RFC4673] S. De Cnodder, N. Jonnala, M. Chiba, RADIUS Dynamic
Authorization Server MIB, RFC\-4673, September 2006.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class CiscoRadiusExtMib(object):
"""
.. attribute:: creclientaccounting
**type**\: :py:class:`Creclientaccounting <ydk.models.cisco_ios_xe.CISCO_RADIUS_EXT_MIB.CiscoRadiusExtMib.Creclientaccounting>`
.. attribute:: creclientauthentication
**type**\: :py:class:`Creclientauthentication <ydk.models.cisco_ios_xe.CISCO_RADIUS_EXT_MIB.CiscoRadiusExtMib.Creclientauthentication>`
.. attribute:: creclientglobal
**type**\: :py:class:`Creclientglobal <ydk.models.cisco_ios_xe.CISCO_RADIUS_EXT_MIB.CiscoRadiusExtMib.Creclientglobal>`
"""
_prefix = 'CISCO-RADIUS-EXT-MIB'
_revision = '2010-05-25'
def __init__(self):
self.creclientaccounting = CiscoRadiusExtMib.Creclientaccounting()
self.creclientaccounting.parent = self
self.creclientauthentication = CiscoRadiusExtMib.Creclientauthentication()
self.creclientauthentication.parent = self
self.creclientglobal = CiscoRadiusExtMib.Creclientglobal()
self.creclientglobal.parent = self
class Creclientglobal(object):
"""
.. attribute:: creclientlastusedsourceid
This MIB object indicates the last source identifier that was used to send out a RADIUS packet when 'extended RADIUS source ports' is configured. The source identifier is a counter that is incremented everytime a RADIUS authentication or an accounting packet is sent
**type**\: int
**range:** 0..255
.. attribute:: creclientlastusedsourceport
If the 'extended RADIUS source ports' is configured, multiple source ports are used for sending out RADIUS authentication or accounting requests. This MIB object indicates the last source port that was used to send out a RADIUS authentication or accounting request
**type**\: int
**range:** 0..65535
.. attribute:: creclientsourceportrangeend
If the 'extended RADIUS source port' is configured, multiple source ports are used for sending out RADIUS authentication or accounting requests. This MIB object indicates the port value where this range ends
**type**\: int
**range:** 0..65535
.. attribute:: creclientsourceportrangestart
If the 'extended RADIUS source ports' is configured, multiple source ports are used for sending out RADIUS authentication or accounting requests. This MIB object indicates the port value from where this range starts
**type**\: int
**range:** 0..65535
.. attribute:: creclienttotalaccessrejects
This object indicates the number of access reject packets received by the RADIUS client
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creclienttotalaverageresponsedelay
This object indicates the overall response delay experienced by RADIUS packets (both authentication and accounting)
**type**\: int
**range:** 0..2147483647
.. attribute:: creclienttotalmaxdoneqlength
This object indicates the maximum length of the queue which stores those RADIUS packets for which the responses are received
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creclienttotalmaxinqlength
This object indicates the maximum length of the queue which stores the incoming RADIUS packets
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creclienttotalmaxwaitqlength
This object indicates the maximum length of the queue which stores the pending RADIUS packets for which the responses are outstanding
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
"""
_prefix = 'CISCO-RADIUS-EXT-MIB'
_revision = '2010-05-25'
def __init__(self):
self.parent = None
self.creclientlastusedsourceid = None
self.creclientlastusedsourceport = None
self.creclientsourceportrangeend = None
self.creclientsourceportrangestart = None
self.creclienttotalaccessrejects = None
self.creclienttotalaverageresponsedelay = None
self.creclienttotalmaxdoneqlength = None
self.creclienttotalmaxinqlength = None
self.creclienttotalmaxwaitqlength = None
@property
def _common_path(self):
return '/CISCO-RADIUS-EXT-MIB:CISCO-RADIUS-EXT-MIB/CISCO-RADIUS-EXT-MIB:creClientGlobal'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.creclientlastusedsourceid is not None:
return True
if self.creclientlastusedsourceport is not None:
return True
if self.creclientsourceportrangeend is not None:
return True
if self.creclientsourceportrangestart is not None:
return True
if self.creclienttotalaccessrejects is not None:
return True
if self.creclienttotalaverageresponsedelay is not None:
return True
if self.creclienttotalmaxdoneqlength is not None:
return True
if self.creclienttotalmaxinqlength is not None:
return True
if self.creclienttotalmaxwaitqlength is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_RADIUS_EXT_MIB as meta
return meta._meta_table['CiscoRadiusExtMib.Creclientglobal']['meta_info']
class Creclientauthentication(object):
"""
.. attribute:: creauthclientaverageresponsedelay
This object indicates the average response delay experienced for RADIUS authentication requests
**type**\: int
**range:** 0..2147483647
.. attribute:: creauthclientbadauthenticators
This object indicates the number of RADIUS authentication response packets received which contained invalid authenticators
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creauthclientbufferallocfailures
This object indicates the number of buffer allocation failures encountered during RADIUS request formation
**type**\: int
**range:** 0..4294967295
**units**\: buffer failures
.. attribute:: creauthclientdupids
This object indicates the number of times client has received duplicate authentication responses with the same identifier. Out of these two packets, the later packet is considered as a true match
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creauthclientlastusedsourceid
This MIB object indicates the last source identifier that was used to send out a RADIUS authentication request when 'extended RADIUS source ports' is configured. The source identifier is a counter that is incremented everytime a RADIUS authentication request is sent
**type**\: int
**range:** 0..255
.. attribute:: creauthclientmalformedresponses
This object indicates the number of malformed RADIUS authentication responses received. Malformed packets include packets with an invalid length
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creauthclientmaxbuffersize
This object indicates the maximum buffer size for RADIUS authentication packet
**type**\: int
**range:** 0..4294967295
**units**\: bytes
.. attribute:: creauthclientmaxresponsedelay
This object indicates the maximum delay experienced for RADIUS authentication requests
**type**\: int
**range:** 0..2147483647
.. attribute:: creauthclienttimeouts
This object indicates the number of timeouts that have occurred for RADIUS authentication. After a timeout the client may retry sending the request to the same server or to a different server or give up depending on the configuration
**type**\: int
**range:** 0..4294967295
**units**\: timeouts
.. attribute:: creauthclienttotalpacketswithoutresponses
This object indicates the number of RADIUS authentication packets that never received a response
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creauthclienttotalpacketswithresponses
This object indicates the number of RADIUS authentication packets that received responses
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creauthclienttotalresponses
This object indicates the number of RADIUS authentication response packets received by the RADIUS client
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creauthclientunknownresponses
This object indicates the number of unknown RADIUS authentication responses received
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
"""
_prefix = 'CISCO-RADIUS-EXT-MIB'
_revision = '2010-05-25'
def __init__(self):
self.parent = None
self.creauthclientaverageresponsedelay = None
self.creauthclientbadauthenticators = None
self.creauthclientbufferallocfailures = None
self.creauthclientdupids = None
self.creauthclientlastusedsourceid = None
self.creauthclientmalformedresponses = None
self.creauthclientmaxbuffersize = None
self.creauthclientmaxresponsedelay = None
self.creauthclienttimeouts = None
self.creauthclienttotalpacketswithoutresponses = None
self.creauthclienttotalpacketswithresponses = None
self.creauthclienttotalresponses = None
self.creauthclientunknownresponses = None
@property
def _common_path(self):
return '/CISCO-RADIUS-EXT-MIB:CISCO-RADIUS-EXT-MIB/CISCO-RADIUS-EXT-MIB:creClientAuthentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.creauthclientaverageresponsedelay is not None:
return True
if self.creauthclientbadauthenticators is not None:
return True
if self.creauthclientbufferallocfailures is not None:
return True
if self.creauthclientdupids is not None:
return True
if self.creauthclientlastusedsourceid is not None:
return True
if self.creauthclientmalformedresponses is not None:
return True
if self.creauthclientmaxbuffersize is not None:
return True
if self.creauthclientmaxresponsedelay is not None:
return True
if self.creauthclienttimeouts is not None:
return True
if self.creauthclienttotalpacketswithoutresponses is not None:
return True
if self.creauthclienttotalpacketswithresponses is not None:
return True
if self.creauthclienttotalresponses is not None:
return True
if self.creauthclientunknownresponses is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_RADIUS_EXT_MIB as meta
return meta._meta_table['CiscoRadiusExtMib.Creclientauthentication']['meta_info']
class Creclientaccounting(object):
"""
.. attribute:: creacctclientaverageresponsedelay
This object indicates the average response delay experienced for RADIUS accounting
**type**\: int
**range:** 0..2147483647
.. attribute:: creacctclientbadauthenticators
This object indicates the number of RADIUS Accounting\-Response packets received with invalid authenticators
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creacctclientbufferallocfailures
This object indicates the number of buffer allocation failures encountered for RADIUS accounting request
**type**\: int
**range:** 0..4294967295
**units**\: buffer failures
.. attribute:: creacctclientdupids
This object indicates the number of times client has received duplicate accounting responses with the same identifier. Out of these two packets, the later packet is considered as a true match
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creacctclientlastusedsourceid
This MIB object indicates the last source identifier that was used to send out a RADIUS accounting request when 'extended RADIUS source ports' is configured. The source identifier is a counter that is incremented everytime a RADIUS accounting request is sent
**type**\: int
**range:** 0..255
.. attribute:: creacctclientmalformedresponses
This object indicates the number of malformed RADIUS accounting responses received. Malformed packets include packets with an invalid length
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creacctclientmaxbuffersize
This object indicates the maximum buffer size for RADIUS accounting packets
**type**\: int
**range:** 0..4294967295
**units**\: bytes
.. attribute:: creacctclientmaxresponsedelay
This object indicates the maximum delay experienced for RADIUS accounting
**type**\: int
**range:** 0..2147483647
.. attribute:: creacctclienttimeouts
This object indicates the number of timeouts that have occurred for RADIUS accounting. After a timeout the client may retry sending the request to the same server or to a different server or give up depending on the configuration
**type**\: int
**range:** 0..4294967295
**units**\: timeouts
.. attribute:: creacctclienttotalpacketswithoutresponses
This object indicates the number of RADIUS accounting packets that never received a response
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creacctclienttotalpacketswithresponses
This object indicates the number of RADIUS accounting packets that received responses
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creacctclienttotalresponses
This object indicates the number of RADIUS accounting response packets received by the RADIUS client
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
.. attribute:: creacctclientunknownresponses
This object indicates the number of unknown RADIUS accounting responses received
**type**\: int
**range:** 0..4294967295
**units**\: RADIUS packets
"""
_prefix = 'CISCO-RADIUS-EXT-MIB'
_revision = '2010-05-25'
def __init__(self):
self.parent = None
self.creacctclientaverageresponsedelay = None
self.creacctclientbadauthenticators = None
self.creacctclientbufferallocfailures = None
self.creacctclientdupids = None
self.creacctclientlastusedsourceid = None
self.creacctclientmalformedresponses = None
self.creacctclientmaxbuffersize = None
self.creacctclientmaxresponsedelay = None
self.creacctclienttimeouts = None
self.creacctclienttotalpacketswithoutresponses = None
self.creacctclienttotalpacketswithresponses = None
self.creacctclienttotalresponses = None
self.creacctclientunknownresponses = None
@property
def _common_path(self):
return '/CISCO-RADIUS-EXT-MIB:CISCO-RADIUS-EXT-MIB/CISCO-RADIUS-EXT-MIB:creClientAccounting'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.creacctclientaverageresponsedelay is not None:
return True
if self.creacctclientbadauthenticators is not None:
return True
if self.creacctclientbufferallocfailures is not None:
return True
if self.creacctclientdupids is not None:
return True
if self.creacctclientlastusedsourceid is not None:
return True
if self.creacctclientmalformedresponses is not None:
return True
if self.creacctclientmaxbuffersize is not None:
return True
if self.creacctclientmaxresponsedelay is not None:
return True
if self.creacctclienttimeouts is not None:
return True
if self.creacctclienttotalpacketswithoutresponses is not None:
return True
if self.creacctclienttotalpacketswithresponses is not None:
return True
if self.creacctclienttotalresponses is not None:
return True
if self.creacctclientunknownresponses is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_RADIUS_EXT_MIB as meta
return meta._meta_table['CiscoRadiusExtMib.Creclientaccounting']['meta_info']
@property
def _common_path(self):
return '/CISCO-RADIUS-EXT-MIB:CISCO-RADIUS-EXT-MIB'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.creclientaccounting is not None and self.creclientaccounting._has_data():
return True
if self.creclientauthentication is not None and self.creclientauthentication._has_data():
return True
if self.creclientglobal is not None and self.creclientglobal._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_RADIUS_EXT_MIB as meta
return meta._meta_table['CiscoRadiusExtMib']['meta_info']
| {
"content_hash": "038b798af162dce8883991ab07abba05",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 276,
"avg_line_length": 34.13009404388715,
"alnum_prop": 0.6111136624569461,
"repo_name": "111pontes/ydk-py",
"id": "8d8b7afa623aa6d241333f2b64a02fc81f87dbe2",
"size": "21775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_RADIUS_EXT_MIB.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware'
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.redirects',
'opps.core',
'opps.channels',
'opps.containers',
'opps.archives',
'opps.boxes',
'opps.sources',
'opps.articles',
'opps.images',
'opps.sitemaps',
'opps.flatpages',
'opps.facebookcomments',
),
SITE_ID = 1,
ROOT_URLCONF = "opps.urls",
TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner',
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
}
},
)
def runtests():
argv = [sys.argv[0], 'test', 'facebookcomments']
execute_from_command_line(argv)
sys.exit(0)
if __name__ == '__main__':
runtests()
| {
"content_hash": "42d425add2e556578d2b89145b521d72",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 76,
"avg_line_length": 28.56451612903226,
"alnum_prop": 0.5561829474872954,
"repo_name": "opps/opps-facebook-comments",
"id": "9f61c376c08d47cc767a6eab2af6d5585710b799",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29585"
}
],
"symlink_target": ""
} |
'''
bac tester.
This works by running a number of scenarios and catching the log output and
comparing it to a successful log.
'''
import json
import socket
import traceback
import time
import bac
class TestError(Exception):
pass
def find_server():
'''Locate a bac server'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', bac.BROADCAST_PORT))
while True:
data = s.recv(1024).decode(bac.ENCODING)
try:
data = json.loads(data)
except ValueError:
pass
if isinstance(data, dict) and data.get('bac') == bac.VERSION:
addr = data.get('address')
port = data.get('port')
return addr, port
def send(socket, msg):
'''Send a message through a socket'''
msg = json.dumps(msg) + "\n"
socket.send(bytes(msg, bac.ENCODING))
def recv(socket):
'''Recieve a message from a socket'''
data = socket.recv(4096).decode(bac.ENCODING)
return data
class log:
'''This logger compares incoming logs with those from a success log file.'''
def __init__(self):
with open('success.txt', 'r') as f:
self.lines = f.readlines()
def __call__(self, c):
line = self.lines.pop(0)[:-1]
if not line == '{}'.format(c):
stack = traceback.extract_stack()
for l in traceback.format_list(stack[:-4]):
print(l[:-1])
print('Expected: "{}"'.format(line))
print('Got: "{}"'.format(c))
raise TestError
class log2:
'''This logger writes logs to a file ot be used a success log file.'''
def __init__(self):
self.f = open('success.txt', 'w')
self.f.truncate(0)
def __call__(self, c):
msg = "{}".format(c)
print(msg)
self.f.write(msg + "\n")
def test(write=False):
if write:
bac.log = log2()
else:
bac.log = log()
# Initialise server
server = bac.Server('Test Server', local=True)
s1 = server.add_slot('Slot 1')
s2 = server.add_slot('Slot 2')
addr = '127.0.0.1'
port = server.socket.getsockname()[1]
# Create sockets and connect to server
socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket1.connect((addr, port))
time.sleep(0.1)
server.process()
socket2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket2.connect((addr, port))
time.sleep(0.1)
server.process()
socket3 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket3.connect((addr, port))
time.sleep(0.1)
server.process()
# Wrong version number
send(socket1, {'bac': bac.VERSION+1})
time.sleep(0.1)
server.process()
#recv(socket1)
# Get slots
send(socket1, {'bac': bac.VERSION, 'command': 'get_slots'})
time.sleep(0.1)
server.process()
#recv(socket1)
# Request slot
send(socket1, {'bac': bac.VERSION, 'command': 'request_slot', 'args': [0]})
time.sleep(0.1)
server.process()
#recv(socket1)
# Get slots
send(socket2, {'bac': bac.VERSION, 'command': 'get_slots'})
time.sleep(0.1)
server.process()
#recv(socket2)
# Request occupied slot
send(socket2, {'bac': bac.VERSION, 'command': 'request_slot', 'args': [0]})
time.sleep(0.1)
server.process()
#recv(socket2)
# Relinquish slot
send(socket1, {'bac': bac.VERSION, 'command': 'relinquish_slot'})
time.sleep(0.1)
server.process()
#recv(socket1)
# Get slots
send(socket3, {'bac': bac.VERSION, 'command': 'get_slots'})
time.sleep(0.1)
server.process()
#recv(socket3)
# Request slot
send(socket1, {'bac': bac.VERSION, 'command': 'request_slot', 'args': [0]})
time.sleep(0.1)
server.process()
#recv(socket1)
# Request slot
send(socket2, {'bac': bac.VERSION, 'command': 'request_slot', 'args': [1]})
time.sleep(0.1)
server.process()
#recv(socket2)
# Get slots
send(socket1, {'bac': bac.VERSION, 'command': 'get_slots'})
time.sleep(0.1)
server.process()
#recv(socket1)
# Disconnect from server
recv(socket1)
socket1.close()
time.sleep(0.1)
server.process()
recv(socket2)
socket2.close()
time.sleep(0.1)
server.process()
recv(socket3)
socket3.close()
time.sleep(0.1)
server.process()
print('All tests completed successfully!')
if __name__ == '__main__':
try:
test(write=True)
except TestError:
pass
| {
"content_hash": "fb382114cbfccbceaad31d415b46623f",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 80,
"avg_line_length": 23.701570680628272,
"alnum_prop": 0.5840512480671527,
"repo_name": "atbentley/bac",
"id": "669861a3e651f13cd022ecb2c0110af00408eb0e",
"size": "4527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14004"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This file contains integration tests for the go/vt/binlog package.
# It sets up filtered replication between two shards and checks how data flows
# through binlog streamer.
import logging
import unittest
import environment
import tablet
import utils
from mysql_flavor import mysql_flavor
from vtdb import keyrange_constants
from vtdb import update_stream
src_master = tablet.Tablet()
src_replica = tablet.Tablet()
src_rdonly1 = tablet.Tablet()
src_rdonly2 = tablet.Tablet()
dst_master = tablet.Tablet()
dst_replica = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
src_master.init_mysql(),
src_replica.init_mysql(),
src_rdonly1.init_mysql(),
src_rdonly2.init_mysql(),
dst_master.init_mysql(),
dst_replica.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
# Set up binlog stream from shard 0 to shard 1.
# Modeled after initial_sharding.py.
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace',
'keyspace_id', keyrange_constants.KIT_UINT64])
src_master.init_tablet('master', 'test_keyspace', '0')
src_replica.init_tablet('replica', 'test_keyspace', '0')
src_rdonly1.init_tablet('rdonly', 'test_keyspace', '0')
src_rdonly2.init_tablet('rdonly', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
utils.validate_topology()
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
for t in [src_master, src_replica, src_rdonly1, src_rdonly2]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None)
for t in [src_master, src_replica, src_rdonly1, src_rdonly2]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
src_master.tablet_alias], auto_log=True)
# Create schema
logging.debug('Creating schema...')
create_table = '''create table test_table(
id bigint auto_increment,
keyspace_id bigint(20) unsigned,
msg varchar(64),
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table,
'test_keyspace'], auto_log=True)
# Create destination shard.
dst_master.init_tablet('master', 'test_keyspace', '-')
dst_replica.init_tablet('replica', 'test_keyspace', '-')
dst_master.start_vttablet(wait_for_state='NOT_SERVING')
dst_replica.start_vttablet(wait_for_state='NOT_SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/-',
dst_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# copy the schema
utils.run_vtctl(['CopySchemaShard', src_replica.tablet_alias,
'test_keyspace/-'], auto_log=True)
# run the clone worked (this is a degenerate case, source and destination
# both have the full keyrange. Happens to work correctly).
logging.debug('Running the clone worker to start binlog stream...')
utils.run_vtworker(['--cell', 'test_nj',
'SplitClone',
'--strategy=-populate_blp_checkpoint',
'--source_reader_count', '10',
'--min_table_size_for_split', '1',
'test_keyspace/0'],
auto_log=True)
dst_master.wait_for_binlog_player_count(1)
# Wait for dst_replica to be ready.
dst_replica.wait_for_binlog_server_state('Enabled')
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
tablet.kill_tablets([src_master, src_replica, src_rdonly1, src_rdonly2,
dst_master, dst_replica])
teardown_procs = [
src_master.teardown_mysql(),
src_replica.teardown_mysql(),
src_rdonly1.teardown_mysql(),
src_rdonly2.teardown_mysql(),
dst_master.teardown_mysql(),
dst_replica.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
src_master.remove_tree()
src_replica.remove_tree()
src_rdonly1.remove_tree()
src_rdonly2.remove_tree()
dst_master.remove_tree()
dst_replica.remove_tree()
def _get_update_stream(tblt):
protocol, endpoint = tblt.update_stream_python_endpoint()
return update_stream.connect(protocol, endpoint, 30)
class TestBinlog(unittest.TestCase):
def test_charset(self):
start_position = mysql_flavor().master_position(dst_replica)
logging.debug('test_charset: starting @ %s', start_position)
# Insert something that will replicate incorrectly if the charset is not
# propagated through binlog streamer to the destination.
#
# Vitess tablets default to using utf8, so we insert something crazy and
# pretend it's latin1. If the binlog player doesn't also pretend it's
# latin1, it will be inserted as utf8, which will change its value.
src_master.mquery(
'vt_test_keyspace',
"INSERT INTO test_table (id, keyspace_id, msg) "
"VALUES (41523, 1, 'Šṛ́rỏé') /* EMD keyspace_id:1 */",
conn_params={'charset': 'latin1'}, write=True)
# Wait for it to replicate.
stream = _get_update_stream(dst_replica)
for stream_event in stream.stream_update(start_position):
if stream_event.category == update_stream.StreamEvent.POS:
break
stream.close()
# Check the value.
data = dst_master.mquery(
'vt_test_keyspace',
'SELECT id, keyspace_id, msg FROM test_table WHERE id=41523 LIMIT 1')
self.assertEqual(len(data), 1, 'No data replicated.')
self.assertEqual(len(data[0]), 3, 'Wrong number of columns.')
self.assertEqual(data[0][2], 'Šṛ́rỏé',
'Data corrupted due to wrong charset.')
def test_checksum_enabled(self):
start_position = mysql_flavor().master_position(dst_replica)
logging.debug('test_checksum_enabled: starting @ %s', start_position)
# Enable binlog_checksum, which will also force a log rotation that should
# cause binlog streamer to notice the new checksum setting.
if not mysql_flavor().enable_binlog_checksum(dst_replica):
logging.debug(
'skipping checksum test on flavor without binlog_checksum setting')
return
# Insert something and make sure it comes through intact.
sql = (
"INSERT INTO test_table (id, keyspace_id, msg) "
"VALUES (19283, 1, 'testing checksum enabled') /* EMD keyspace_id:1 */")
src_master.mquery('vt_test_keyspace', sql, write=True)
# Look for it using update stream to see if binlog streamer can talk to
# dst_replica, which now has binlog_checksum enabled.
stream = _get_update_stream(dst_replica)
found = False
for stream_event in stream.stream_update(start_position):
if stream_event.category == update_stream.StreamEvent.POS:
break
if stream_event.sql == sql:
found = True
break
stream.close()
self.assertEqual(found, True, 'expected query not found in update stream')
def test_checksum_disabled(self):
# Disable binlog_checksum to make sure we can also talk to a server without
# checksums enabled, in case they are enabled by default.
start_position = mysql_flavor().master_position(dst_replica)
logging.debug('test_checksum_disabled: starting @ %s', start_position)
# For flavors that don't support checksums, this is a no-op.
mysql_flavor().disable_binlog_checksum(dst_replica)
# Insert something and make sure it comes through intact.
sql = (
"INSERT INTO test_table (id, keyspace_id, msg) "
"VALUES (58812, 1, 'testing checksum disabled') "
"/* EMD keyspace_id:1 */")
src_master.mquery(
'vt_test_keyspace', sql, write=True)
# Look for it using update stream to see if binlog streamer can talk to
# dst_replica, which now has binlog_checksum disabled.
stream = _get_update_stream(dst_replica)
found = False
for stream_event in stream.stream_update(start_position):
if stream_event.category == update_stream.StreamEvent.POS:
break
if stream_event.sql == sql:
found = True
break
stream.close()
self.assertEqual(found, True, 'expected query not found in update stream')
if __name__ == '__main__':
utils.main()
| {
"content_hash": "6acafbeadc3349d82aabb719c01cd3f4",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 80,
"avg_line_length": 35.54216867469879,
"alnum_prop": 0.6543502824858757,
"repo_name": "kuipertan/vitess",
"id": "e64e81c4cac1b08d0d1acf8baf5d76e8fcafbcc5",
"size": "8864",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/binlog.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "40319"
},
{
"name": "CSS",
"bytes": "225977"
},
{
"name": "Go",
"bytes": "4686201"
},
{
"name": "HTML",
"bytes": "85331"
},
{
"name": "Java",
"bytes": "253382"
},
{
"name": "JavaScript",
"bytes": "71511"
},
{
"name": "Liquid",
"bytes": "16985"
},
{
"name": "Makefile",
"bytes": "5213"
},
{
"name": "PHP",
"bytes": "56151"
},
{
"name": "PLpgSQL",
"bytes": "10220"
},
{
"name": "Protocol Buffer",
"bytes": "63755"
},
{
"name": "Python",
"bytes": "993209"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "25588"
},
{
"name": "Yacc",
"bytes": "19014"
}
],
"symlink_target": ""
} |
"""This code example creates a new line item to serve to video content. This
feature is only available to DFP premium solution networks. To determine which
line items exist, run get_all_line_items.py. To determine which orders exist,
run get_all_orders.py. To create a video ad unit, run create_video_ad_unit.py.
To create criteria for categories, run
create_custom_targeting_keys_and_values.py"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
from datetime import date
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201204')
# Set order that all created line item will belong to and the video ad unit id
# to target.
order_id = 'INSERT_ORDER_ID_HERE'
targeted_video_ad_unit_id = 'INSERT_TARGETED_VIDEO_AD_UNIT_ID_HERE'
# Set the custom targeting key ID and value ID representing the metadata on the
# content to target. This would typically be a key representing a 'genre' and
# a value representing something like 'comedy'.
content_custom_targeting_key_id = 'INSERT_CONTENT_CUSTOM_TARGETING_KEY_ID_HERE'
content_custom_targeting_value_id = \
'INSERT_CONTENT_CUSTOM_TARGETING_VALUE_ID_HERE'
# create custom criteria for the content metadata targeting.
custom_criteria = {
'xsi_type': 'CustomCriteria',
'keyId': content_custom_targeting_key_id,
'valueIds': [content_custom_targeting_value_id],
'operator': 'IS'
}
# Create the custom criteria set.
top_set = {
'xsi_type': 'CustomCriteriaSet',
'logicalOperator': 'OR',
'children': [custom_criteria]
}
# Create line item object.
line_item = {
'name': 'Line item #%s' % Utils.GetUniqueName(),
'orderId': order_id,
'targeting': {
'customTargeting': top_set,
'inventoryTargeting': {
'targetedAdUnits': [{'adUnitId': targeted_video_ad_unit_id,
'includeDescendants': 'True'}]
},
'videoPositionTargeting': {
'targetedVideoPositions': ['PREROLL']
}
},
'creativePlaceholders': [
{
'size': {
'width': '400',
'height': '300'
},
'companions': [
{
'size': {
'width': '300',
'height': '250'
},
},
{
'size': {
'width': '728',
'height': '90'
},
}
]
}
],
'environmentType': 'VIDEO_PLAYER',
'companionDeliveryOption': 'OPTIONAL',
'startDateTimeType': 'IMMEDIATELY',
'lineItemType': 'SPONSORSHIP',
'endDateTime': {
'date': {
'year': str(date.today().year + 1),
'month': '9',
'day': '30'
},
'hour': '0',
'minute': '0',
'second': '0'
},
'costType': 'CPM',
'costPerUnit': {
'currencyCode': 'USD',
'microAmount': '2000000'
},
'creativeRotationType': 'OPTIMIZED',
'discountType': 'PERCENTAGE',
'unitsBought': '100',
'unitType': 'IMPRESSIONS',
'allowOverbook': 'True'
}
# Add line item.
line_item = line_item_service.CreateLineItem(line_item)[0]
# Display results.
print ('Video line item with id \'%s\', belonging to order id \'%s\', and named'
' \'%s\' was created.' % (line_item['id'], line_item['orderId'],
line_item['name']))
| {
"content_hash": "6028efbc868ca2089b7f45611ab2e48c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 80,
"avg_line_length": 31.853658536585368,
"alnum_prop": 0.5809086268504339,
"repo_name": "donspaulding/adspygoogle",
"id": "4e8a9f1ab978ffb6207adb3ff1741cdb946dd609",
"size": "4536",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201204/create_video_line_item.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3734067"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
} |
from .endpoint import Endpoint
from .check_traffic_manager_relative_dns_name_availability_parameters import CheckTrafficManagerRelativeDnsNameAvailabilityParameters
from .dns_config import DnsConfig
from .monitor_config import MonitorConfig
from .profile import Profile
from .traffic_manager_name_availability import TrafficManagerNameAvailability
from .resource import Resource
from .profile_paged import ProfilePaged
__all__ = [
'Endpoint',
'CheckTrafficManagerRelativeDnsNameAvailabilityParameters',
'DnsConfig',
'MonitorConfig',
'Profile',
'TrafficManagerNameAvailability',
'Resource',
'ProfilePaged',
]
| {
"content_hash": "eb23eb4327a091eac6b66555cef4ea0e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 133,
"avg_line_length": 33.73684210526316,
"alnum_prop": 0.7987519500780031,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "657f837d95ad5009db671883db10f07ec2df90c1",
"size": "1115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-trafficmanager/azure/mgmt/trafficmanager/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
} |
from django.http import StreamingHttpResponse
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveAPIView
from rest_framework.settings import api_settings
from spillway import filters, forms, mixins, pagination, renderers, serializers
_default_filters = tuple(api_settings.DEFAULT_FILTER_BACKENDS)
_default_renderers = tuple(api_settings.DEFAULT_RENDERER_CLASSES)
class BaseGeoView(mixins.ModelSerializerMixin):
"""Base view for models with geometry fields."""
model_serializer_class = serializers.FeatureSerializer
pagination_class = pagination.FeaturePagination
filter_backends = _default_filters + (
filters.SpatialLookupFilter, filters.GeoQuerySetFilter)
renderer_classes = _default_renderers + (
renderers.GeoJSONRenderer, renderers.KMLRenderer, renderers.KMZRenderer)
class GeoDetailView(BaseGeoView, RetrieveAPIView):
"""Generic detail view providing vector geometry representations."""
class GeoListView(BaseGeoView, ListAPIView):
"""Generic view for listing a geoqueryset."""
class GeoListCreateAPIView(BaseGeoView, ListCreateAPIView):
"""Generic view for listing or creating geomodel instances."""
class BaseRasterView(mixins.ModelSerializerMixin, mixins.QueryFormMixin):
"""Base view for raster models."""
model_serializer_class = serializers.RasterModelSerializer
query_form_class = forms.RasterQueryForm
filter_backends = _default_filters
renderer_classes = _default_renderers + (
renderers.GeoTIFFZipRenderer,
renderers.HFAZipRenderer,
)
def finalize_response(self, request, response, *args, **kwargs):
response = super(BaseRasterView, self).finalize_response(
request, response, *args, **kwargs)
# Use streaming responses for GDAL formats.
if isinstance(response.accepted_renderer,
renderers.gdal.BaseGDALRenderer):
headers = response._headers
response = StreamingHttpResponse(response.rendered_content)
response._headers = headers
return response
def get_serializer_context(self):
context = super(BaseRasterView, self).get_serializer_context()
renderer = self.request.accepted_renderer
context.update(format=renderer.format, **self.clean_params())
return context
class RasterDetailView(BaseRasterView, RetrieveAPIView):
"""View providing access to a Raster model instance."""
renderer_classes = _default_renderers + (
renderers.GeoTIFFRenderer,
renderers.HFARenderer,
)
class RasterListView(BaseRasterView, ListAPIView):
"""View providing access to a Raster model QuerySet."""
filter_backends = _default_filters + (filters.SpatialLookupFilter,)
| {
"content_hash": "2a4bd640ba52cb5129155857fcdf55ef",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 83,
"avg_line_length": 39,
"alnum_prop": 0.7331166486096063,
"repo_name": "kuzmich/django-spillway",
"id": "6c6335377ab95cf545dab49d63274cfa6f6deace",
"size": "2769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spillway/generics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "389"
},
{
"name": "Python",
"bytes": "109771"
}
],
"symlink_target": ""
} |
class ViewRootController():
def __init__(self, view_root, model_document):
self._view_root = view_root
self._model_document = model_document
self.connectSignals()
def connectSignals(self):
m_d = self._model_document
v_r = self._view_root
m_d.documentPartAddedSignal.connect(v_r.partAddedSlot)
m_d.documentClearSelectionsSignal.connect(v_r.clearSelectionsSlot)
m_d.documentSelectionFilterChangedSignal.connect(v_r.selectionFilterChangedSlot)
m_d.documentViewResetSignal.connect(v_r.resetRootItemSlot)
def disconnectSignals(self):
m_d = self._model_document
v_r = self._view_root
m_d.documentPartAddedSignal.disconnect(v_r.partAddedSlot)
m_d.documentClearSelectionsSignal.disconnect(v_r.clearSelectionsSlot)
m_d.documentSelectionFilterChangedSignal.disconnect(v_r.selectionFilterChangedSlot)
m_d.documentViewResetSignal.disconnect(v_r.resetRootItemSlot) | {
"content_hash": "15dd5b3bb3c07b52bcd037beecf16dad",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 91,
"avg_line_length": 47.285714285714285,
"alnum_prop": 0.7109768378650554,
"repo_name": "JMMolenaar/cadnano2.5",
"id": "050b0f2b683d5d859b1a1a2897f023f88f82be61",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cadnano/gui/controllers/viewrootcontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2645"
},
{
"name": "Python",
"bytes": "1501551"
},
{
"name": "QMake",
"bytes": "3719"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sentry.message_filters import _localhost_filter
from sentry.relay.config import ProjectConfig
from sentry.testutils import TestCase
class LocalhostFilterTest(TestCase):
def apply_filter(self, data):
project_config = ProjectConfig(self.project)
return _localhost_filter(project_config, data)
def get_mock_data(self, client_ip=None, url=None):
return {"user": {"ip_address": client_ip}, "request": {"url": url}}
def test_filters_localhost_ipv4(self):
data = self.get_mock_data("127.0.0.1")
assert self.apply_filter(data)
def test_filters_localhost_ipv6(self):
data = self.get_mock_data("::1")
assert self.apply_filter(data)
def test_does_not_filter_external_ip(self):
data = self.get_mock_data("74.1.3.56")
assert not self.apply_filter(data)
def test_fails_gracefully_without_user(self):
assert not self.apply_filter({})
def test_filters_localhost_domain(self):
data = self.get_mock_data(url="http://localhost/something.html")
assert self.apply_filter(data)
data = self.get_mock_data(url="http://localhost:9000/")
assert self.apply_filter(data)
data = self.get_mock_data(url="https://localhost")
assert self.apply_filter(data)
data = self.get_mock_data(url="https://127.0.0.1")
assert self.apply_filter(data)
def test_does_not_filter_non_localhost_domain(self):
data = self.get_mock_data(url="https://getsentry.com/")
assert not self.apply_filter(data)
data = self.get_mock_data(url="http://example.com/index.html?domain=localhost")
assert not self.apply_filter(data)
| {
"content_hash": "bef9412f3b050a7edb435957778955a4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 87,
"avg_line_length": 35.3469387755102,
"alnum_prop": 0.6610854503464203,
"repo_name": "mvaled/sentry",
"id": "256cd66312faa5c3cdf997cbf70072c031152f48",
"size": "1732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sentry/filters/test_localhost.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
from mistral import exceptions as exc
from mistral.tests.unit.rpc.kombu import base
from mistral.tests.unit.rpc.kombu import fake_kombu
from mistral import utils
import mock
from six import moves
with mock.patch.dict('sys.modules', kombu=fake_kombu):
from mistral.rpc.kombu import base as kombu_base
from mistral.rpc.kombu import kombu_listener
class TestException(exc.MistralException):
pass
class KombuListenerTestCase(base.KombuTestCase):
def setUp(self):
super(KombuListenerTestCase, self).setUp()
self.listener = kombu_listener.KombuRPCListener(
[mock.MagicMock()],
mock.MagicMock()
)
self.ctx = type('context', (object,), {'to_dict': lambda self: {}})()
def test_add_listener(self):
correlation_id = utils.generate_unicode_uuid()
self.listener.add_listener(correlation_id)
self.assertEqual(
type(self.listener._results.get(correlation_id)),
moves.queue.Queue
)
self.assertEqual(0, self.listener._results[correlation_id].qsize())
def test_remove_listener_correlation_id_in_results(self):
correlation_id = utils.generate_unicode_uuid()
self.listener.add_listener(correlation_id)
self.assertEqual(
type(self.listener._results.get(correlation_id)),
moves.queue.Queue
)
self.listener.remove_listener(correlation_id)
self.assertIsNone(
self.listener._results.get(correlation_id)
)
def test_remove_listener_correlation_id_not_in_results(self):
correlation_id = utils.generate_unicode_uuid()
self.listener.add_listener(correlation_id)
self.assertEqual(
type(self.listener._results.get(correlation_id)),
moves.queue.Queue
)
self.listener.remove_listener(utils.generate_unicode_uuid())
self.assertEqual(
type(self.listener._results.get(correlation_id)),
moves.queue.Queue
)
@mock.patch('threading.Thread')
def test_start_thread_not_set(self, thread_class_mock):
thread_mock = mock.MagicMock()
thread_class_mock.return_value = thread_mock
self.listener.start()
self.assertTrue(thread_mock.daemon)
self.assertEqual(thread_mock.start.call_count, 1)
@mock.patch('threading.Thread')
def test_start_thread_set(self, thread_class_mock):
thread_mock = mock.MagicMock()
thread_class_mock.return_value = thread_mock
self.listener._thread = mock.MagicMock()
self.listener.start()
self.assertEqual(thread_mock.start.call_count, 0)
def test_get_result_results_in_queue(self):
expected_result = 'abcd'
correlation_id = utils.generate_unicode_uuid()
self.listener.add_listener(correlation_id)
self.listener._results.get(correlation_id).put(expected_result)
result = self.listener.get_result(correlation_id, 5)
self.assertEqual(result, expected_result)
def test_get_result_not_in_queue(self):
correlation_id = utils.generate_unicode_uuid()
self.listener.add_listener(correlation_id)
self.assertRaises(
moves.queue.Empty,
self.listener.get_result,
correlation_id,
1 # timeout
)
def test_get_result_lack_of_queue(self):
correlation_id = utils.generate_unicode_uuid()
self.assertRaises(
KeyError,
self.listener.get_result,
correlation_id,
1 # timeout
)
def test__on_response_message_ack_fail(self):
message = mock.MagicMock()
message.ack.side_effect = Exception('Test Exception')
response = 'response'
kombu_listener.LOG = mock.MagicMock()
self.listener.on_message(response, message)
self.assertEqual(kombu_listener.LOG.debug.call_count, 1)
self.assertEqual(kombu_listener.LOG.exception.call_count, 1)
def test__on_response_message_ack_ok_corr_id_not_match(self):
message = mock.MagicMock()
message.properties = mock.MagicMock()
message.properties.__getitem__ = lambda *args, **kwargs: True
response = 'response'
kombu_listener.LOG = mock.MagicMock()
self.listener.on_message(response, message)
self.assertEqual(kombu_listener.LOG.debug.call_count, 3)
self.assertEqual(kombu_listener.LOG.exception.call_count, 0)
def test__on_response_message_ack_ok_messsage_type_error(self):
correlation_id = utils.generate_unicode_uuid()
message = mock.MagicMock()
message.properties = dict()
message.properties['type'] = 'error'
message.properties['correlation_id'] = correlation_id
response = TestException('response')
kombu_listener.LOG = mock.MagicMock()
self.listener.add_listener(correlation_id)
self.listener.on_message(response, message)
self.assertEqual(kombu_listener.LOG.debug.call_count, 2)
self.assertEqual(kombu_listener.LOG.exception.call_count, 0)
result = self.listener.get_result(correlation_id, 5)
self.assertDictEqual(
result,
{
kombu_base.TYPE: 'error',
kombu_base.RESULT: response
}
)
def test__on_response_message_ack_ok(self):
correlation_id = utils.generate_unicode_uuid()
message = mock.MagicMock()
message.properties = dict()
message.properties['type'] = None
message.properties['correlation_id'] = correlation_id
response = 'response'
kombu_listener.LOG = mock.MagicMock()
self.listener.add_listener(correlation_id)
self.listener.on_message(response, message)
self.assertEqual(kombu_listener.LOG.debug.call_count, 2)
self.assertEqual(kombu_listener.LOG.exception.call_count, 0)
result = self.listener.get_result(correlation_id, 5)
self.assertDictEqual(
result,
{
kombu_base.TYPE: None,
kombu_base.RESULT: response
}
)
| {
"content_hash": "b70e2b84f652f80df51ac4a962c8bbb0",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 77,
"avg_line_length": 30.480392156862745,
"alnum_prop": 0.6326793181087166,
"repo_name": "StackStorm/mistral",
"id": "53f42df03c9e0e407f670cc2f9e94d06a98dcca9",
"size": "6855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/tests/unit/rpc/kombu/test_kombu_listener.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1494"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2249335"
},
{
"name": "Shell",
"bytes": "31326"
}
],
"symlink_target": ""
} |
"""Clean up resources from gcp projects. """
import argparse
import collections
import datetime
import json
import os
import subprocess
import sys
# A resource that need to be cleared.
Resource = collections.namedtuple('Resource', 'name group condition managed tolerate')
DEMOLISH_ORDER = [
# [WARNING FROM KRZYZACY] : TOUCH THIS WITH CARE!
# ORDER REALLY MATTERS HERE!
Resource('instances', None, 'zone', None, False),
Resource('addresses', None, 'region', None, False),
Resource('disks', None, 'zone', None, False),
Resource('firewall-rules', None, None, None, False),
Resource('routes', None, None, None, False),
Resource('forwarding-rules', None, 'region', None, False),
Resource('target-http-proxies', None, None, None, False),
Resource('target-https-proxies', None, None, None, False),
Resource('url-maps', None, None, None, False),
Resource('backend-services', None, 'region', None, False),
Resource('target-pools', None, 'region', None, False),
Resource('health-checks', None, None, None, False),
Resource('http-health-checks', None, None, None, False),
Resource('instance-groups', None, 'zone', 'Yes', False),
Resource('instance-groups', None, 'zone', 'No', False),
Resource('instance-templates', None, None, None, False),
Resource('networks', 'subnets', 'region', None, True),
Resource('networks', None, '', None, False),
]
def collect(project, age, resource, filt):
""" Collect a list of resources for each condition (zone or region).
Args:
project: The name of a gcp project.
age: Time cutoff from the creation of a resource.
resource: Definition of a type of gcloud resource.
filt: Filter clause for gcloud list command.
Returns:
A dict of condition : list of gcloud resource object.
Raises:
ValueError if json result from gcloud is invalid.
"""
col = collections.defaultdict(list)
cmd = ['gcloud', 'compute', '-q', resource.name]
if resource.group:
cmd.append(resource.group)
cmd.extend([
'list',
'--format=json(name,creationTimestamp.date(tz=UTC),zone,region,isManaged)',
'--filter=%s' % filt,
'--project=%s' % project])
print '%r' % cmd
for item in json.loads(subprocess.check_output(cmd)):
print '%r' % item
if 'name' not in item or 'creationTimestamp' not in item:
raise ValueError('%r' % item)
if resource.condition and resource.condition in item:
colname = item[resource.condition]
else:
colname = ''
if resource.managed:
if 'isManaged' not in item:
raise ValueError(resource.name, resource.managed)
else:
if resource.managed != item['isManaged']:
continue
# Unify datetime to use utc timezone.
created = datetime.datetime.strptime(item['creationTimestamp'], '%Y-%m-%dT%H:%M:%S')
print ('Found %r(%r), %r in %r, created time = %r' %
(resource.name, resource.group, item['name'], colname, item['creationTimestamp']))
if created < age:
print ('Added to janitor list: %r(%r), %r' %
(resource.name, resource.group, item['name']))
col[colname].append(item['name'])
return col
def clear_resources(project, cols, resource):
"""Clear a collection of resource, from collect func above.
Args:
project: The name of a gcp project.
cols: A dict of collection of resource.
resource: Definition of a type of gcloud resource.
Returns:
0 if no error
1 if deletion command fails
"""
err = 0
for col, items in cols.items():
if ARGS.dryrun:
print ('Resource type %r(%r) to be deleted: %r' %
(resource.name, resource.group, list(items)))
continue
manage_key = {'Yes':'managed', 'No':'unmanaged'}
# construct the customized gcloud commend
base = ['gcloud', 'compute', '-q', resource.name]
if resource.group:
base.append(resource.group)
if resource.managed:
base.append(manage_key[resource.managed])
base.append('delete')
base.append('--project=%s' % project)
if resource.condition:
if col:
base.append('--%s=%s' % (resource.condition, col))
else:
base.append('--global')
print 'Call %r' % base
try:
subprocess.check_call(base + list(items))
except subprocess.CalledProcessError as exc:
if not resource.tolerate:
err = 1
print >>sys.stderr, 'Error try to delete resources: %r' % exc
return err
def clean_gke_cluster(project, age, filt):
"""Clean up potential leaking gke cluster"""
# a cluster can be created in one of those three endpoints
endpoints = [
'https://test-container.sandbox.googleapis.com/', # test
'https://staging-container.sandbox.googleapis.com/', # staging
'https://container.googleapis.com/', # prod
]
err = 0
for endpoint in endpoints:
os.environ['CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER'] = endpoint
print "checking endpoint %s" % endpoint
cmd = [
'gcloud', 'container', '-q', 'clusters', 'list',
'--project=%s' % project,
'--filter=%s' % filt,
'--format=json(name,createTime,zone)'
]
print 'running %s' % cmd
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as exc:
print >>sys.stderr, 'Cannot reach endpoint %s with %r, continue' % (endpoint, exc)
continue
for item in json.loads(output):
print 'cluster info: %r' % item
if 'name' not in item or 'createTime' not in item or 'zone' not in item:
print >>sys.stderr, 'name, createTime and zone must present'
raise ValueError('%r' % item)
# The raw createTime string looks like 2017-08-30T18:33:14+00:00
# Which python 2.7 does not support timezones.
# Since age is already in UTC time we'll just strip the timezone part
item['createTime'] = item['createTime'].split('+')[0]
created = datetime.datetime.strptime(
item['createTime'], '%Y-%m-%dT%H:%M:%S')
if created < age:
print ('Found stale gke cluster %r in %r, created time = %r' %
(item['name'], endpoint, item['createTime']))
delete = [
'gcloud', 'container', '-q', 'clusters', 'delete',
item['name'],
'--project=%s' % project,
'--zone=%s' % item['zone'],
]
try:
print 'running %s' % delete
subprocess.check_call(delete)
except subprocess.CalledProcessError as exc:
err = 1
print >>sys.stderr, 'Error try to delete cluster %s: %r' % (item['name'], exc)
return err
def main(project, days, hours, filt):
""" Clean up resources from a gcp project based on it's creation time
Args:
project: The name of a gcp project.
days/hours: days/hours of maximum lifetime of a gcp resource.
filt: Resource instance filters when query.
Returns:
0 if no error
1 if list or delete command fails
"""
print '[=== Start Janitor on project %r ===]' % project
err = 0
age = datetime.datetime.utcnow() - datetime.timedelta(days=days, hours=hours)
for res in DEMOLISH_ORDER:
print 'Try to search for %r with condition %r' % (res.name, res.condition)
try:
col = collect(project, age, res, filt)
if col:
err |= clear_resources(project, col, res)
except (subprocess.CalledProcessError, ValueError):
err |= 1 # keep clean the other resource
print >>sys.stderr, 'Fail to list resource %r from project %r' % (res.name, project)
# try to clean leaking gke cluster
if 'gke' in project:
try:
err |= clean_gke_cluster(project, age, filt)
except ValueError:
err |= 1 # keep clean the other resource
print >>sys.stderr, 'Fail to clean up cluster from project %r' % project
print '[=== Finish Janitor on project %r with status %r ===]' % (project, err)
sys.exit(err)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Clean up resources from an expired project')
PARSER.add_argument('--project', help='Project to clean', required=True)
PARSER.add_argument(
'--days', type=int,
help='Clean items more than --days old (added to --hours)')
PARSER.add_argument(
'--hours', type=float,
help='Clean items more than --hours old (added to --days)')
PARSER.add_argument(
'--filter',
default='NOT tags.items:do-not-delete AND NOT name ~ ^default',
help='Filter down to these instances')
PARSER.add_argument(
'--dryrun',
default=False,
action='store_true',
help='list but not delete resources')
ARGS = PARSER.parse_args()
# We want to allow --days=0 and --hours=0, so check against None instead.
if ARGS.days is None and ARGS.hours is None:
print >>sys.stderr, 'must specify --days and/or --hours'
sys.exit(1)
main(ARGS.project, ARGS.days or 0, ARGS.hours or 0, ARGS.filter)
| {
"content_hash": "ac6723ab5be938feb39c3a1d861046f1",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 98,
"avg_line_length": 37.213740458015266,
"alnum_prop": 0.5822564102564103,
"repo_name": "krousey/test-infra",
"id": "054d0b1bee5e110cc7ed8448b5b556d9ecf0a1c4",
"size": "10361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "boskos/janitor/janitor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14741"
},
{
"name": "Go",
"bytes": "2991878"
},
{
"name": "HTML",
"bytes": "60941"
},
{
"name": "JavaScript",
"bytes": "147542"
},
{
"name": "Makefile",
"bytes": "47366"
},
{
"name": "Python",
"bytes": "889612"
},
{
"name": "Roff",
"bytes": "11821"
},
{
"name": "Shell",
"bytes": "74312"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
} |
from .helper import *
| {
"content_hash": "ae44ab7dc4d5728c1fc31dab269a0816",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 21,
"avg_line_length": 11.5,
"alnum_prop": 0.6956521739130435,
"repo_name": "SarahWooller/Using_NetSurfP",
"id": "bdfdb117ccbb50980ad170207b74cf0291bf7e54",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lib/Using_NetSurfP/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "91601"
},
{
"name": "Python",
"bytes": "10971"
}
],
"symlink_target": ""
} |
SESSION_CASE_TYPE = 'session'
PEER_RATING_CASE_TYPE = 'peer_rating'
SESSION_RATING_CASE_PROP = 'session_rating'
MEAN_TREATMENT_SPECIFIC_SCORE_CASE_PROP = 'mean_treatment_specific_score'
MEAN_GENERAL_SKILLS_SCORE_CASE_PROP = 'mean_general_skills_score'
DATE_OF_PEER_REVIEW_CASE_PROP = 'date_of_peer_review'
NEEDS_AGGREGATION_CASE_PROP = 'needs_aggregation'
NEEDS_AGGREGATION_NO_VALUE = 'no'
| {
"content_hash": "3526c24af9992bc586d9166b7a409b7f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 39.2,
"alnum_prop": 0.7678571428571429,
"repo_name": "dimagi/commcare-hq",
"id": "91393f583e1877f19d3b58b25a52506943677c32",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/gcc_sangath/const.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
countries = [['China','Beijing',1350],
['India','Delhi',1210],
['Romania','Bucharest',21],
['United States','Washington',307]]
# Write code to print out the capital of India
# by accessing the array.
print countries[1][1]
| {
"content_hash": "d251f550c87901d3acda1ea135703c43",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.5817490494296578,
"repo_name": "xala3pa/Computer-Science-cs101",
"id": "4d2a0fb23c8ec07f59ba5ca4bff75aedda3f2a4c",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lesson3/countries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84588"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from oslo_config import cfg
from oslo_log import log as logging
from senlin.common import constraints
from senlin.common import consts
from senlin.common import exception as exc
from senlin.common.i18n import _
from senlin.common import scaleutils
from senlin.common import schema
from senlin.engine import health_manager
from senlin.policies import base
LOG = logging.getLogger(__name__)
class HealthPolicy(base.Policy):
"""Policy for health management of a cluster."""
VERSION = '1.2'
VERSIONS = {
'1.0': [
{'status': consts.EXPERIMENTAL, 'since': '2017.02'},
{'status': consts.SUPPORTED, 'since': '2018.06'},
],
'1.1': [
{'status': consts.SUPPORTED, 'since': '2018.09'}
],
'1.2': [
{'status': consts.SUPPORTED, 'since': '2020.09'}
],
}
PRIORITY = 600
TARGET = [
('BEFORE', consts.CLUSTER_RECOVER),
('BEFORE', consts.CLUSTER_DEL_NODES),
('BEFORE', consts.CLUSTER_SCALE_IN),
('BEFORE', consts.CLUSTER_RESIZE),
('BEFORE', consts.CLUSTER_UPDATE),
('BEFORE', consts.CLUSTER_RECOVER),
('BEFORE', consts.CLUSTER_REPLACE_NODES),
('BEFORE', consts.NODE_DELETE),
('AFTER', consts.CLUSTER_DEL_NODES),
('AFTER', consts.CLUSTER_SCALE_IN),
('AFTER', consts.CLUSTER_RESIZE),
('AFTER', consts.CLUSTER_UPDATE),
('AFTER', consts.CLUSTER_RECOVER),
('AFTER', consts.CLUSTER_REPLACE_NODES),
('AFTER', consts.NODE_DELETE),
]
# Should be ANY if profile provides health check support?
PROFILE_TYPE = [
'os.nova.server',
'os.heat.stack',
]
KEYS = (DETECTION, RECOVERY) = ('detection', 'recovery')
_DETECTION_KEYS = (
DETECTION_MODES, DETECTION_TYPE, DETECTION_OPTIONS, DETECTION_INTERVAL,
NODE_UPDATE_TIMEOUT, RECOVERY_CONDITIONAL
) = (
'detection_modes', 'type', 'options', 'interval',
'node_update_timeout', 'recovery_conditional'
)
_DETECTION_OPTIONS = (
POLL_URL, POLL_URL_SSL_VERIFY,
POLL_URL_CONN_ERROR_AS_UNHEALTHY, POLL_URL_HEALTHY_RESPONSE,
POLL_URL_RETRY_LIMIT, POLL_URL_RETRY_INTERVAL,
) = (
'poll_url', 'poll_url_ssl_verify',
'poll_url_conn_error_as_unhealthy', 'poll_url_healthy_response',
'poll_url_retry_limit', 'poll_url_retry_interval'
)
_RECOVERY_KEYS = (
RECOVERY_ACTIONS, RECOVERY_FENCING, RECOVERY_DELETE_TIMEOUT,
RECOVERY_FORCE_RECREATE,
) = (
'actions', 'fencing', 'node_delete_timeout', 'node_force_recreate',
)
FENCING_OPTION_VALUES = (
COMPUTE,
# STORAGE, NETWORK,
) = (
'COMPUTE',
# 'STORAGE', 'NETWORK'
)
ACTION_KEYS = (
ACTION_NAME, ACTION_PARAMS,
) = (
'name', 'params',
)
properties_schema = {
DETECTION: schema.Map(
_('Policy aspect for node failure detection.'),
schema={
DETECTION_INTERVAL: schema.Integer(
_("Number of seconds between pollings. Only "
"required when type is 'NODE_STATUS_POLLING' or "
"'NODE_STATUS_POLL_URL' or 'HYPERVISOR_STATUS_POLLING."),
default=60,
),
NODE_UPDATE_TIMEOUT: schema.Integer(
_("Number of seconds since last node update to "
"wait before checking node health."),
default=300,
),
RECOVERY_CONDITIONAL: schema.String(
_("The conditional that determines when recovery should be"
" performed in case multiple detection modes are "
"specified. 'ALL_FAILED' means that all "
"detection modes have to return failed health checks "
"before a node is recovered. 'ANY_FAILED'"
" means that a failed health check with a single "
"detection mode triggers a node recovery."),
constraints=[
constraints.AllowedValues(
consts.RECOVERY_CONDITIONAL),
],
default=consts.ANY_FAILED,
required=False,
),
DETECTION_MODES: schema.List(
_('List of node failure detection modes.'),
schema=schema.Map(
_('Node failure detection mode to try'),
schema={
DETECTION_TYPE: schema.String(
_('Type of node failure detection.'),
constraints=[
constraints.AllowedValues(
consts.DETECTION_TYPES),
],
required=True,
),
DETECTION_OPTIONS: schema.Map(
schema={
POLL_URL: schema.String(
_("URL to poll for node status. See "
"documentation for valid expansion "
"parameters. Only required "
"when type is "
"'NODE_STATUS_POLL_URL'."),
default='',
),
POLL_URL_SSL_VERIFY: schema.Boolean(
_("Whether to verify SSL when calling "
"URL to poll for node status. Only "
"required when type is "
"'NODE_STATUS_POLL_URL'."),
default=True,
),
POLL_URL_CONN_ERROR_AS_UNHEALTHY:
schema.Boolean(
_("Whether to treat URL connection "
"errors as an indication of an "
"unhealthy node. Only required "
"when type is "
"'NODE_STATUS_POLL_URL'."),
default=True,
),
POLL_URL_HEALTHY_RESPONSE: schema.String(
_("String pattern in the poll URL "
"response body that indicates a "
"healthy node. Required when type "
"is 'NODE_STATUS_POLL_URL'."),
default='',
),
POLL_URL_RETRY_LIMIT: schema.Integer(
_("Number of times to retry URL "
"polling when its return body is "
"missing POLL_URL_HEALTHY_RESPONSE "
"string before a node is considered "
"down. Required when type is "
"'NODE_STATUS_POLL_URL'."),
default=3,
),
POLL_URL_RETRY_INTERVAL: schema.Integer(
_("Number of seconds between URL "
"polling retries before a node is "
"considered down. Required when "
"type is 'NODE_STATUS_POLL_URL'."),
default=3,
),
},
default={}
),
}
)
)
},
required=True,
),
RECOVERY: schema.Map(
_('Policy aspect for node failure recovery.'),
schema={
RECOVERY_ACTIONS: schema.List(
_('List of actions to try for node recovery.'),
schema=schema.Map(
_('Action to try for node recovery.'),
schema={
ACTION_NAME: schema.String(
_("Name of action to execute."),
constraints=[
constraints.AllowedValues(
consts.RECOVERY_ACTIONS),
],
required=True
),
ACTION_PARAMS: schema.Map(
_("Parameters for the action")
),
}
)
),
RECOVERY_FENCING: schema.List(
_('List of services to be fenced.'),
schema=schema.String(
_('Service to be fenced.'),
constraints=[
constraints.AllowedValues(FENCING_OPTION_VALUES),
],
required=True,
),
),
RECOVERY_DELETE_TIMEOUT: schema.Integer(
_("Number of seconds to wait for node deletion to "
"finish and start node creation for recreate "
"recovery option. Required when type is "
"'NODE_STATUS_POLL_URL and recovery action "
"is RECREATE'."),
default=20,
),
RECOVERY_FORCE_RECREATE: schema.Boolean(
_("Whether to create node even if node deletion "
"failed. Required when type is "
"'NODE_STATUS_POLL_URL' and action recovery "
"action is RECREATE."),
default=False,
),
},
required=True,
),
}
def __init__(self, name, spec, **kwargs):
super(HealthPolicy, self).__init__(name, spec, **kwargs)
self.interval = self.properties[self.DETECTION].get(
self.DETECTION_INTERVAL, 60)
self.node_update_timeout = self.properties[self.DETECTION].get(
self.NODE_UPDATE_TIMEOUT, 300)
self.recovery_conditional = self.properties[self.DETECTION].get(
self.RECOVERY_CONDITIONAL, consts.ANY_FAILED)
DetectionMode = namedtuple(
'DetectionMode',
[self.DETECTION_TYPE] + list(self._DETECTION_OPTIONS))
self.detection_modes = []
raw_modes = self.properties[self.DETECTION][self.DETECTION_MODES]
for mode in raw_modes:
options = mode[self.DETECTION_OPTIONS]
self.detection_modes.append(
DetectionMode(
mode[self.DETECTION_TYPE],
options.get(self.POLL_URL, ''),
options.get(self.POLL_URL_SSL_VERIFY, True),
options.get(self.POLL_URL_CONN_ERROR_AS_UNHEALTHY, True),
options.get(self.POLL_URL_HEALTHY_RESPONSE, ''),
options.get(self.POLL_URL_RETRY_LIMIT, ''),
options.get(self.POLL_URL_RETRY_INTERVAL, '')
)
)
recover_settings = self.properties[self.RECOVERY]
self.recover_actions = recover_settings[self.RECOVERY_ACTIONS]
self.fencing_types = recover_settings[self.RECOVERY_FENCING]
self.node_delete_timeout = recover_settings.get(
self.RECOVERY_DELETE_TIMEOUT, None)
self.node_force_recreate = recover_settings.get(
self.RECOVERY_FORCE_RECREATE, False)
def validate(self, context, validate_props=False):
super(HealthPolicy, self).validate(context,
validate_props=validate_props)
if len(self.recover_actions) > 1:
message = _("Only one '%s' is supported for now."
) % self.RECOVERY_ACTIONS
raise exc.ESchema(message=message)
if self.interval < cfg.CONF.health_check_interval_min:
message = _("Specified interval of %(interval)d seconds has to be "
"larger than health_check_interval_min of "
"%(min_interval)d seconds set in configuration."
) % {"interval": self.interval,
"min_interval":
cfg.CONF.health_check_interval_min}
raise exc.InvalidSpec(message=message)
# check valid detection types
polling_types = [consts.NODE_STATUS_POLLING,
consts.NODE_STATUS_POLL_URL,
consts.HYPERVISOR_STATUS_POLLING]
has_valid_polling_types = all(
d.type in polling_types
for d in self.detection_modes
)
has_valid_lifecycle_type = (
len(self.detection_modes) == 1 and
self.detection_modes[0].type == consts.LIFECYCLE_EVENTS
)
if not has_valid_polling_types and not has_valid_lifecycle_type:
message = ("Invalid detection modes in health policy: %s" %
', '.join([d.type for d in self.detection_modes]))
raise exc.InvalidSpec(message=message)
if len(self.detection_modes) != len(set(self.detection_modes)):
message = ("Duplicate detection modes are not allowed in "
"health policy: %s" %
', '.join([d.type for d in self.detection_modes]))
raise exc.InvalidSpec(message=message)
# TODO(Qiming): Add detection of duplicated action names when
# support to list of actions is implemented.
def attach(self, cluster, enabled=True):
""""Hook for policy attach.
Register the cluster for health management.
:param cluster: The cluster to which the policy is being attached to.
:param enabled: The attached cluster policy is enabled or disabled.
:return: A tuple comprising execution result and policy data.
"""
p_type = cluster.rt['profile'].type_name
action_names = [a['name'] for a in self.recover_actions]
if p_type != 'os.nova.server':
if consts.RECOVER_REBUILD in action_names:
err_msg = _("Recovery action REBUILD is only applicable to "
"os.nova.server clusters.")
return False, err_msg
if consts.RECOVER_REBOOT in action_names:
err_msg = _("Recovery action REBOOT is only applicable to "
"os.nova.server clusters.")
return False, err_msg
kwargs = {
'interval': self.interval,
'node_update_timeout': self.node_update_timeout,
'params': {
'recover_action': self.recover_actions,
'node_delete_timeout': self.node_delete_timeout,
'node_force_recreate': self.node_force_recreate,
'recovery_conditional': self.recovery_conditional,
},
'enabled': enabled
}
converted_detection_modes = [
d._asdict() for d in self.detection_modes
]
detection_mode = {'detection_modes': converted_detection_modes}
kwargs['params'].update(detection_mode)
ret = health_manager.register(cluster.id, engine_id=None, **kwargs)
if not ret:
LOG.warning('Registering health manager for cluster %s '
'timed out.', cluster.id)
err_msg = _("Registering health manager for cluster timed out.")
return False, err_msg
data = {
'interval': self.interval,
'node_update_timeout': self.node_update_timeout,
'recovery_conditional': self.recovery_conditional,
'node_delete_timeout': self.node_delete_timeout,
'node_force_recreate': self.node_force_recreate,
}
data.update(detection_mode)
return True, self._build_policy_data(data)
def detach(self, cluster):
"""Hook for policy detach.
Unregister the cluster for health management.
:param cluster: The target cluster.
:returns: A tuple comprising the execution result and reason.
"""
ret = health_manager.unregister(cluster.id)
if not ret:
LOG.warning('Unregistering health manager for cluster %s '
'timed out.', cluster.id)
err_msg = _("Unregistering health manager for cluster timed out.")
return False, err_msg
return True, ''
def pre_op(self, cluster_id, action, **args):
"""Hook before action execution.
Disable health policy for actions that modify cluster nodes (e.g.
scale in, delete nodes, cluster update, cluster recover and cluster
replace nodes).
For all other actions, set the health policy data in the action data.
:param cluster_id: The ID of the target cluster.
:param action: The action to be examined.
:param kwargs args: Other keyword arguments to be checked.
:returns: Boolean indicating whether the checking passed.
"""
if action.action in (consts.CLUSTER_SCALE_IN,
consts.CLUSTER_DEL_NODES,
consts.NODE_DELETE,
consts.CLUSTER_UPDATE,
consts.CLUSTER_RECOVER,
consts.CLUSTER_REPLACE_NODES):
health_manager.disable(cluster_id)
return True
if action.action == consts.CLUSTER_RESIZE:
deletion = action.data.get('deletion', None)
if deletion:
health_manager.disable(cluster_id)
return True
cluster = action.entity
current = len(cluster.nodes)
res, reason = scaleutils.parse_resize_params(action, cluster,
current)
if res == base.CHECK_ERROR:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = reason
return False
if action.data.get('deletion', None):
health_manager.disable(cluster_id)
return True
pd = {
'recover_action': self.recover_actions,
'fencing': self.fencing_types,
}
action.data.update({'health': pd})
action.store(action.context)
return True
def post_op(self, cluster_id, action, **args):
"""Hook before action execution.
One of the task for this routine is to re-enable health policy if the
action is a request that will shrink the cluster thus the policy has
been temporarily disabled.
:param cluster_id: The ID of the target cluster.
:param action: The action to be examined.
:param kwargs args: Other keyword arguments to be checked.
:returns: Boolean indicating whether the checking passed.
"""
if action.action in (consts.CLUSTER_SCALE_IN,
consts.CLUSTER_DEL_NODES,
consts.NODE_DELETE,
consts.CLUSTER_UPDATE,
consts.CLUSTER_RECOVER,
consts.CLUSTER_REPLACE_NODES):
health_manager.enable(cluster_id)
return True
if action.action == consts.CLUSTER_RESIZE:
deletion = action.data.get('deletion', None)
if deletion:
health_manager.enable(cluster_id)
return True
cluster = action.entity
current = len(cluster.nodes)
res, reason = scaleutils.parse_resize_params(action, cluster,
current)
if res == base.CHECK_ERROR:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = reason
return False
if action.data.get('deletion', None):
health_manager.enable(cluster_id)
return True
return True
| {
"content_hash": "686dccfc1f12b96f65162c568da81a78",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 79,
"avg_line_length": 41.964356435643566,
"alnum_prop": 0.48367308418271043,
"repo_name": "stackforge/senlin",
"id": "bb1fa6bc5987a5217754468614007aa0c09328f8",
"size": "21741",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/policies/health_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2145946"
},
{
"name": "Shell",
"bytes": "18730"
}
],
"symlink_target": ""
} |
from test_framework.names import NameTestFramework
from test_framework.util import *
class NameExpirationTest (NameTestFramework):
def set_test_params (self):
self.setup_name_test ([["-debug=names"]] * 4)
def checkUTXO (self, ind, name, shouldBeThere):
"""
Query for a name's coin in the UTXO set and check that it is either
there or not.
"""
data = self.nodes[ind].name_show (name)
txo = self.nodes[ind].gettxout (data['txid'], data['vout'])
if shouldBeThere:
assert txo is not None
assert_equal (txo['scriptPubKey']['nameOp']['name'], name)
else:
assert txo is None
def run_test (self):
# Start the registration of two names which will be used. name-long
# will expire and be reregistered on the short chain, which will be
# undone with the reorg. name-short will be updated before expiration
# on the short chain, but this will be rerolled and the name expire
# instead on the long chain. Check that the mempool and the UTXO set
# behave as they should.
newLong = self.nodes[0].name_new ("name-long")
newLong2 = self.nodes[3].name_new ("name-long")
newShort = self.nodes[3].name_new ("name-short")
self.generate (1, 12)
# Register the names. name-long should expire one block before
# name-short, so that the situation described above works out.
updLong = self.firstupdateName (0, "name-long", newLong, "value")
self.generate (1, 2)
updShort = self.firstupdateName (3, "name-short", newShort, "value")
self.generate (1, 27)
self.checkName (1, "name-long", "value", 2, False)
self.checkName (1, "name-short", "value", 4, False)
# Check that the UTXO entries are there.
self.checkUTXO (1, "name-long", True)
self.checkUTXO (1, "name-short", True)
# Split the network.
self.split_network ()
# Let name-long expire on the short chain.
self.generate (2, 2)
self.checkName (2, "name-long", "value", 0, True)
self.checkName (2, "name-short", "value", 2, False)
self.checkUTXO (2, "name-long", False)
self.checkUTXO (2, "name-short", True)
# Snatch up name-long and update name-short just-in-time. Note that
# "just-in-time" is "expires_in == 2", since when creating the block,
# it will be "expires_in == 1" already!
updLong2 = self.firstupdateName (3, "name-long", newLong2, "value 2")
renewShort = self.nodes[3].name_update ("name-short", "renewed")
self.generate (2, 1)
self.checkName (2, "name-long", "value 2", 30, False)
self.checkName (2, "name-short", "renewed", 30, False)
self.checkNameHistory (2, "name-long", ["value", "value 2"])
self.checkNameHistory (2, "name-short", ["value", "renewed"])
# Create a longer chain on the other part of the network. Let name-short
# expire there but renew name-long instead.
self.nodes[0].name_update ("name-long", "renewed")
self.generate (1, 5)
self.checkName (1, "name-long", "renewed", 26, False)
self.checkName (1, "name-short", "value", -1, True)
self.checkNameHistory (1, "name-long", ["value", "renewed"])
self.checkNameHistory (1, "name-short", ["value"])
self.checkUTXO (1, "name-long", True)
self.checkUTXO (1, "name-short", False)
# Join the network and let the long chain prevail. This should
# completely revoke all changes on the short chain, including
# the mempool (since all tx there are conflicts with name expirations).
assert self.nodes[1].getblockcount () > self.nodes[2].getblockcount ()
self.join_network ()
# Test the expected situation of the long chain.
self.checkName (2, "name-long", "renewed", 26, False)
self.checkName (2, "name-short", "value", -1, True)
self.checkNameHistory (2, "name-long", ["value", "renewed"])
self.checkNameHistory (2, "name-short", ["value"])
self.checkUTXO (2, "name-long", True)
self.checkUTXO (2, "name-short", False)
# Check that the conflicting tx's are removed from the mempool.
assert_equal (self.nodes[0].getrawmempool (), [])
assert_equal (self.nodes[3].getrawmempool (), [])
data = self.nodes[3].gettransaction (updLong2)
assert data['confirmations'] <= 0
data = self.nodes[3].gettransaction (renewShort)
assert data['confirmations'] <= 0
# Redo the same stuff but now without actually mining the conflicted tx
# on the short chain. Make sure that the mempool cleaning works as expected
# also in this case.
#
# name-unexpired will unexpire in the chain reorg, which means that we
# will try to re-register it on the short chain.
#
# name-expired will expire in the chain reorg, which means that we try
# to update it on the short chain (but that will be too late for the
# long one after the reorg).
newUnexpired = self.nodes[0].name_new ("name-unexpired")
newExpired = self.nodes[3].name_new ("name-expired")
newSnatch = self.nodes[3].name_new ("name-unexpired")
self.generate (1, 12)
self.firstupdateName (0, "name-unexpired", newUnexpired, "value")
self.generate (1, 2)
self.firstupdateName (3, "name-expired", newExpired, "value")
self.generate (1, 27)
self.checkName (1, "name-unexpired", "value", 2, False)
self.checkName (1, "name-expired", "value", 4, False)
self.split_network ()
self.generate (2, 2)
self.checkName (2, "name-unexpired", "value", 0, True)
self.checkName (2, "name-expired", "value", 2, False)
updExpired = self.firstupdateName (3, "name-unexpired", newSnatch,
"value 2")
updUnexpired = self.nodes[3].name_update ("name-expired", "renewed")
mempoolShort = self.nodes[3].getrawmempool ()
assert updExpired in mempoolShort
assert updUnexpired in mempoolShort
self.nodes[0].name_update ("name-unexpired", "renewed")
self.generate (1, 5)
self.checkName (1, "name-unexpired", "renewed", 26, False)
self.checkName (1, "name-expired", "value", -1, True)
assert self.nodes[1].getblockcount () > self.nodes[2].getblockcount ()
self.join_network ()
assert_equal (self.nodes[0].getrawmempool (), [])
assert_equal (self.nodes[3].getrawmempool (), [])
if __name__ == '__main__':
NameExpirationTest ().main ()
| {
"content_hash": "e5613f59001c717f8106d054fac627ca",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 80,
"avg_line_length": 42.16778523489933,
"alnum_prop": 0.6551010663695687,
"repo_name": "brandonrobertz/namecoin-core",
"id": "2746eef41f1bcd41be73058413923f4db69b8807",
"size": "6613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/name_expiration.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "688659"
},
{
"name": "C++",
"bytes": "5733984"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "194059"
},
{
"name": "Makefile",
"bytes": "114172"
},
{
"name": "Objective-C",
"bytes": "5737"
},
{
"name": "Objective-C++",
"bytes": "6763"
},
{
"name": "Python",
"bytes": "1363751"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "67429"
}
],
"symlink_target": ""
} |
import mock
from sherlock.batch.ingest_multiple_dates import ParallelEtStepper, ETLStepper
from sherlock.batch.ingest_multiple_dates import ETLStep, _executor
from sherlock.batch.ingest_multiple_dates import KeyboardInterruptError
from sherlock.batch.ingest_multiple_dates import parse_command_line
from sherlock.batch.ingest_multiple_dates import ingest_multiple_dates_main
import staticconf.testing
from tests.data.mock_config import MOCK_CONFIG
from multiprocessing import ProcessError
import pytest
dummy_args = [
'ingest_multiple_dates.py',
'--io_yaml',
'pipeline_io.yaml',
'--config',
'config.yaml',
'--private',
'user_session.yaml',
'--config-override',
'co.yaml',
'db.yaml'
]
DUMMY_CPU_COUNT = 8
@pytest.fixture(params=[
['-r', '-s', '2014-05-01', '-e', '2014-05-01'],
['-r', '-s', '2014-05-01', '-e', '2014-05-12', '--retry-errors'],
['-r', '-s', '2014-05-12', '-e', '2014-05-01', '-p', '5'],
['-s', '2014-05-01', '-e', '2014-07-12', '-p', '50'],
['-s', '2014-05-01', '-e', '2014-05-02', '-p', '5'],
['-s', '2014-05-01', '-e', '2014-05-02', '-p', '9'],
['-s', '2014-05-01', '-e', '2014-05-12', '-p', '9'],
['-s', '2014-05-01', '-e', '2014-05-12', '-p', '-7'],
['-s', '2014-05-01', '-e', '2014-05-12', '-p', '9',
'--exceed-max-processes'],
['-r', '-s', '2014-05-01', '-e', '2014-05-01',
'--load-polling-interval', '1'],
])
def et_steppers(request):
args_namespace = parse_command_line(dummy_args + request.param)
return [ParallelEtStepper(args_namespace), ETLStepper(args_namespace)]
@pytest.mark.parametrize("input_args, expected_value", [
(['-r', '-s', '2014-05-01', '-e', '2014-05-12', '--retry-errors'], 1),
(['-r', '-s', '2014-05-12', '-e', '2014-05-01', '-p', '5'], 5),
(['-s', '2014-05-01', '-e', '2014-07-12', '-p', '50'], DUMMY_CPU_COUNT),
(['-s', '2014-05-01', '-e', '2014-05-02', '-p', '5'], 2),
(['-s', '2014-05-01', '-e', '2014-05-02', '-p', '9'], 2),
(['-s', '2014-05-01', '-e', '2014-05-12', '-p', '9'], DUMMY_CPU_COUNT),
(['-s', '2014-05-01', '-e', '2014-05-12', '-p', '-7'], 1),
(['-s', '2014-05-01', '-e', '2014-05-12', '-p', '9',
'--exceed-max-processes'], 9)
])
def test_pool_size(input_args, expected_value):
args_namespace = parse_command_line(dummy_args + input_args)
pes = ParallelEtStepper(args_namespace)
pes._setup_pool_size(args_namespace, DUMMY_CPU_COUNT)
assert pes.pool_size == expected_value
def test_construct_steps(et_steppers):
for et_stepper in et_steppers:
et_stepper._construct_etl_generator()
load_steps = [step for step in et_stepper.load_generator]
et_steps = [step for step in et_stepper.et_generator]
span = (et_stepper.end - et_stepper.start).days + 1
assert len(load_steps) == span
assert len(et_steps) == span
assert [step for step in et_steps if step.step_type != 'et'] == []
assert [step for step in load_steps if step.step_type != 'load'] == []
class EtlTest(ETLStep):
def __init__(self):
super(EtlTest, self).__init__(None, 'YYYY/MM/DD', 'type')
def execute(self):
pass
class EtlErrorTest(EtlTest):
def __init__(self, error_type):
super(EtlErrorTest, self).__init__()
self.error_type = error_type
def execute(self):
raise self.error_type
def validate_execution_results(results, status='success'):
for r in results:
assert r['status'] == status
assert r['type'] == 'type'
if status == 'success':
assert r['error_info'] == {}
def test__executor():
validate_execution_results([_executor(EtlTest())])
validate_execution_results([_executor(EtlErrorTest(Exception))], 'error')
with pytest.raises(KeyboardInterruptError):
_executor(EtlErrorTest(KeyboardInterrupt))
def test_step_execution(et_steppers):
def mock_construct(days_from_start, step_type):
return EtlTest()
for et_stepper in et_steppers:
setattr(et_stepper, '_construct_step', mock_construct)
validate_execution_results(et_stepper.execute_et_steps())
validate_execution_results(et_stepper.execute_load_steps())
@pytest.mark.parametrize("error_type, exception_type", [
(Exception, ProcessError),
(KeyboardInterrupt, BaseException),
])
def test_error_handling(et_steppers, error_type, exception_type):
def mock_construct(days_from_start, step_type):
return EtlErrorTest(error_type)
for et_stepper in et_steppers:
setattr(et_stepper, '_construct_step', mock_construct)
with pytest.raises(exception_type):
et_stepper.execute_et_steps()
with pytest.raises(exception_type):
et_stepper.execute_load_steps()
@pytest.mark.parametrize("optional_args", [
['-r', '-s', '2014-05-01', '-e', '2014/05/02'],
['-r', '-s', '2014-05-01', '-e', '2014-14-02'],
])
def test_exit_on_bad_input_date(optional_args):
with pytest.raises(SystemExit):
parse_command_line(dummy_args + optional_args)
def _test_ingest_multiple_dates_main(params):
with mock.patch(
'sherlock.batch.ingest_multiple_dates.ETLStep',
return_value=EtlTest(),
autospec=True,
):
validate_execution_results(ingest_multiple_dates_main(params))
def _test_ingest_multiple_dates_main_exceptions(params):
excToStatus = {
KeyboardInterruptError: (
'cancelled' if params.load_only or params.serial_stepper else 'error'
),
KeyboardInterrupt: 'cancelled',
ProcessError: 'error',
SystemExit: ('unknown' if params.load_only or params.serial_stepper else 'error')
}
for exc_type in excToStatus.keys():
with mock.patch(
'sherlock.batch.ingest_multiple_dates.ETLStep',
return_value=EtlErrorTest(exc_type),
autospec=True,
):
expected_status = excToStatus[exc_type]
validate_execution_results(
ingest_multiple_dates_main(params), expected_status
)
@pytest.mark.parametrize("optional_args", [
['-r', '-s', '2014-05-01', '-e', '2014-05-02', '--load-only'],
['-r', '-s', '2014-05-01', '-e', '2014-05-02', '--et-only'],
['-r', '-s', '2014-05-01', '-e', '2014-05-02', '--serial-stepper'],
])
def test_ingest_multiple_dates_main(optional_args):
with staticconf.testing.MockConfiguration(MOCK_CONFIG):
params = parse_command_line(dummy_args + optional_args)
with mock.patch(
'staticconf.YamlConfiguration', return_value={}, autospec=True
):
_test_ingest_multiple_dates_main(params)
_test_ingest_multiple_dates_main_exceptions(params)
| {
"content_hash": "64ec2246e9d3230fe0f8ec8593d5decd",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 89,
"avg_line_length": 35.54210526315789,
"alnum_prop": 0.6041759218125278,
"repo_name": "Yelp/mycroft",
"id": "e1f1aa18fed143db79ecbe4e0f14a8452ed4771b",
"size": "6778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycroft/tests/batch/test_ingest_multiple_dates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "389"
},
{
"name": "Dockerfile",
"bytes": "1402"
},
{
"name": "HTML",
"bytes": "32973"
},
{
"name": "JavaScript",
"bytes": "41476"
},
{
"name": "Makefile",
"bytes": "2094"
},
{
"name": "Python",
"bytes": "590882"
},
{
"name": "Shell",
"bytes": "1011"
}
],
"symlink_target": ""
} |
"""
pyvisa-py.tcpip
~~~~~~~~~~~~~~~
TCPIP Session implementation using Python Standard library.
:copyright: 2014 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
import socket
import select
import time
from pyvisa import constants, attributes, errors
from .sessions import Session, UnknownAttribute
from .protocols import vxi11, rpc
from . import common
StatusCode = constants.StatusCode
SUCCESS = StatusCode.success
@Session.register(constants.InterfaceType.tcpip, 'INSTR')
class TCPIPInstrSession(Session):
"""A TCPIP Session that uses the network standard library to do the low level communication
using VXI-11
"""
lock_timeout = 1000
timeout = 1000
client_id = None
link = None
max_recv_size = 1024
@staticmethod
def list_resources():
# TODO: is there a way to get this?
return []
def after_parsing(self):
# TODO: board_number not handled
# TODO: lan_device_name not handled
self.interface = vxi11.CoreClient(self.parsed.host_address)
self.lock_timeout = 10000
self.timeout = 10000
self.client_id = random.getrandbits(31)
error, link, abort_port, max_recv_size = self.interface.create_link(
self.client_id, 0, self.lock_timeout, self.parsed.lan_device_name)
if error:
raise Exception("error creating link: %d" % error)
self.link = link
self.max_recv_size = min(max_recv_size, 2 ** 30) # 1GB
for name in ("SEND_END_EN", "TERMCHAR", "TERMCHAR_EN"):
attribute = getattr(constants, 'VI_ATTR_' + name)
self.attrs[attribute] = attributes.AttributesByID[attribute].default
def close(self):
try:
self.interface.destroy_link(self.link)
except (errors.VisaIOError, socket.error, rpc.RPCError) as e:
print("Error closing VISA link: {}".format(e))
self.interface.close()
self.link = None
self.interface = None
def read(self, count):
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
:param count: Number of bytes to be read.
:return: data read, return value of the library call.
:rtype: bytes, VISAStatus
"""
if count < self.max_recv_size:
chunk_length = count
else:
chunk_length = self.max_recv_size
if self.get_attribute(constants.VI_ATTR_TERMCHAR_EN)[0]:
term_char, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR)
term_char = str(term_char).encode('utf-8')[0]
flags = vxi11.OP_FLAG_TERMCHAR_SET
else:
term_char = flags = 0
read_data = bytearray()
reason = 0
end_reason = vxi11.RX_END | vxi11.RX_CHR
read_fun = self.interface.device_read
status = SUCCESS
while reason & end_reason == 0:
error, reason, data = read_fun(self.link, chunk_length, self.timeout,
self.lock_timeout, flags, term_char)
if error == vxi11.ErrorCodes.io_timeout:
return bytes(read_data), StatusCode.error_timeout
elif error:
return bytes(read_data), StatusCode.error_io
read_data.extend(data)
count -= len(data)
if count <= 0:
status = StatusCode.success_max_count_read
break
chunk_length = min(count, chunk_length)
return bytes(read_data), status
def write(self, data):
"""Writes data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
:param data: data to be written.
:type data: str
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, VISAStatus
"""
send_end, _ = self.get_attribute(constants.VI_ATTR_SEND_END_EN)
chunk_size = 1024
try:
if send_end:
flags = vxi11.OP_FLAG_TERMCHAR_SET
else:
flags = 0
num = len(data)
offset = 0
while num > 0:
if num <= chunk_size:
flags |= vxi11.OP_FLAG_END
block = data[offset:offset + self.max_recv_size]
error, size = self.interface.device_write(
self.link, self.timeout, self.lock_timeout, flags, block)
if error == vxi11.ErrorCodes.io_timeout:
return offset, StatusCode.error_timeout
elif error or size < len(block):
return offset, StatusCode.error_io
offset += size
num -= size
return offset, SUCCESS
except vxi11.Vxi11Error:
return 0, StatusCode.error_timeout
def _get_attribute(self, attribute):
"""Get the value for a given VISA attribute for this session.
Use to implement custom logic for attributes.
:param attribute: Resource attribute for which the state query is made
:return: The state of the queried attribute for a specified resource, return value of the library call.
:rtype: (unicode | str | list | int, VISAStatus)
"""
if attribute == constants.VI_ATTR_TCPIP_ADDR:
return self.host_address, SUCCESS
elif attribute == constants.VI_ATTR_TCPIP_DEVICE_NAME:
raise NotImplementedError
elif attribute == constants.VI_ATTR_TCPIP_HOSTNAME:
raise NotImplementedError
elif attribute == constants.VI_ATTR_TCPIP_KEEPALIVE:
raise NotImplementedError
elif attribute == constants.VI_ATTR_TCPIP_NODELAY:
raise NotImplementedError
elif attribute == constants.VI_ATTR_TCPIP_PORT:
raise NotImplementedError
elif attribute == constants.VI_ATTR_SUPPRESS_END_EN:
raise NotImplementedError
raise UnknownAttribute(attribute)
def _set_attribute(self, attribute, attribute_state):
"""Sets the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
:param attribute: Attribute for which the state is to be modified. (Attributes.*)
:param attribute_state: The state of the attribute to be set for the specified object.
:return: return value of the library call.
:rtype: VISAStatus
"""
raise UnknownAttribute(attribute)
def assert_trigger(self, protocol):
"""Asserts software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
:param protocol: Trigger protocol to use during assertion. (Constants.PROT*)
:return: return value of the library call.
:rtype: VISAStatus
"""
error = self.interface.device_trigger(self.link, 0, self.lock_timeout,
self.io_timeout)
if error:
# TODO: Which status to return
raise Exception("error triggering: %d" % error)
return SUCCESS
def clear(self):
"""Clears a device.
Corresponds to viClear function of the VISA library.
:return: return value of the library call.
:rtype: VISAStatus
"""
error = self.interface.device_clear(self.link, 0, self.lock_timeout,
self.io_timeout)
if error:
# TODO: Which status to return
raise Exception("error clearing: %d" % error)
return SUCCESS
def read_stb(self):
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
:return: Service request status byte, return value of the library call.
:rtype: int, VISAStatus
"""
error, stb = self.interface.device_read_stb(self.link, 0,
self.lock_timeout,
self.io_timeout)
if error:
# TODO: Which status to return
raise Exception("error reading status: %d" % error)
return stb, SUCCESS
def lock(self, lock_type, timeout, requested_key=None):
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
:param lock_type: Specifies the type of lock requested, either Constants.EXCLUSIVE_LOCK or Constants.SHARED_LOCK.
:param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the
locking session before returning an error.
:param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK.
:return: access_key that can then be passed to other sessions to share the lock, return value of the library call.
:rtype: str, VISAStatus
"""
# TODO: lock type not implemented
flags = 0
error = self.interface.device_lock(self.link, flags, self.lock_timeout)
if error:
# TODO: Which status to return
raise Exception("error locking: %d" % error)
def unlock(self):
"""Relinquishes a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
:return: return value of the library call.
:rtype: VISAStatus
"""
error = self.interface.device_unlock(self.link)
if error:
# TODO: Which message to return
raise Exception("error unlocking: %d" % error)
@Session.register(constants.InterfaceType.tcpip, 'SOCKET')
class TCPIPSocketSession(Session):
"""A TCPIP Session that uses the network standard library to do the low level communication.
"""
lock_timeout = 1000
timeout = 1000
max_recv_size = 4096
# This buffer is used to store the bytes that appeared after termination char
_pending_buffer = b''
@staticmethod
def list_resources():
# TODO: is there a way to get this?
return []
def after_parsing(self):
# TODO: board_number not handled
self.interface = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.interface.setblocking(0)
try:
self.interface.connect_ex((self.parsed.host_address, int(self.parsed.port)))
except Exception as e:
raise Exception("could not create socket: %s" % e)
self.attrs[constants.VI_ATTR_TCPIP_ADDR] = self.parsed.host_address
self.attrs[constants.VI_ATTR_TCPIP_PORT] = self.parsed.port
self.attrs[constants.VI_ATTR_INTF_NUM] = self.parsed.board
for name in ("TERMCHAR", "TERMCHAR_EN"):
attribute = getattr(constants, 'VI_ATTR_' + name)
self.attrs[attribute] = attributes.AttributesByID[attribute].default
def close(self):
self.interface.close()
self.interface = None
def read(self, count):
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
:param count: Number of bytes to be read.
:return: data read, return value of the library call.
:rtype: bytes, VISAStatus
"""
if count < self.max_recv_size:
chunk_length = count
else:
chunk_length = self.max_recv_size
end_char, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR)
enabled, _ = self.get_attribute(constants.VI_ATTR_TERMCHAR_EN)
timeout, _ = self.get_attribute(constants.VI_ATTR_TMO_VALUE)
timeout /= 1000
end_byte = common.int_to_byte(end_char) if end_char else b''
read_fun = self.interface.recv
now = start = time.time()
out = self._pending_buffer
if enabled and end_byte in out:
parts = out.split(end_byte)
self._pending_buffer = b''.join(parts[1:])
return (out + parts[0] + end_byte,
constants.StatusCode.success_termination_character_read)
while now - start <= timeout:
# use select to wait for read ready
select.select([self.interface], [], [])
last = read_fun(chunk_length)
if not last:
time.sleep(.01)
now = time.time()
continue
if enabled and end_byte in last:
parts = last.split(end_byte)
self._pending_buffer = b''.join(parts[1:])
return (out + parts[0] + end_byte,
constants.StatusCode.success_termination_character_read)
out += last
if len(out) == count:
return out, constants.StatusCode.success_max_count_read
else:
return out, constants.StatusCode.error_timeout
def write(self, data):
"""Writes data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
:param data: data to be written.
:type data: str
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, VISAStatus
"""
chunk_size = 4096
num = sz = len(data)
offset = 0
while num > 0:
block = data[offset:min(offset + chunk_size, sz)]
try:
# use select to wait for write ready
select.select([], [self.interface], [])
size = self.interface.send(block)
except socket.timeout as e:
return offset, StatusCode.error_io
if size < len(block):
return offset, StatusCode.error_io
offset += size
num -= size
return offset, SUCCESS
def _get_attribute(self, attribute):
"""Get the value for a given VISA attribute for this session.
Use to implement custom logic for attributes.
:param attribute: Resource attribute for which the state query is made
:return: The state of the queried attribute for a specified resource, return value of the library call.
:rtype: (unicode | str | list | int, VISAStatus)
"""
if attribute == constants.VI_ATTR_TCPIP_HOSTNAME:
raise NotImplementedError
elif attribute == constants.VI_ATTR_TCPIP_KEEPALIVE:
raise NotImplementedError
elif attribute == constants.VI_ATTR_TCPIP_NODELAY:
raise NotImplementedError
raise UnknownAttribute(attribute)
def _set_attribute(self, attribute, attribute_state):
"""Sets the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
:param attribute: Attribute for which the state is to be modified. (Attributes.*)
:param attribute_state: The state of the attribute to be set for the specified object.
:return: return value of the library call.
:rtype: VISAStatus
"""
raise UnknownAttribute(attribute)
| {
"content_hash": "52c4caf2101db615ca3f77549b7d6746",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 122,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.5988516246900691,
"repo_name": "andeh575/pyvisa-py",
"id": "2f6528df8bb124d82fb5fb99436ac3012c1cd399",
"size": "15350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisa-py/tcpip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133992"
}
],
"symlink_target": ""
} |
import queue
import collections
from .cmdsys import command, is_command
from .event import Event, Target
class BridgeChannel:
def __init__(self, manager):
self._manager = manager
self.bridges = set()
self.users = {}
def _bridge_join(self, bridge_id):
if bridge_id in self.bridges:
raise ValueError("bridge already joined")
self.bridges.add(bridge_id)
def _bridge_leave(self, bridge_id):
bridge_users = [(i, u) for i, u in self.users.items()
if u['bridge_id'] == bridge_id]
for user_id, user in bridge_users:
event = Event(self, self._manager, 'user_leave', id(self), user_id)
self._manager.events.put(event)
self.bridges.remove(bridge_id)
def _user_join(self, user_id, name, bridge_id):
if user_id in self.users:
raise ValueError("user already joined")
self.users[user_id] = {"name": name, "bridge_id": bridge_id}
def _user_update(self, user_id, name):
self.users[user_id]["name"] = name
def _user_leave(self, user_id):
del self.users[user_id]
class BridgeManager:
def __init__(self, config):
self.config = config
self.events = queue.Queue()
self._bridges = {"manager": self}
self._channels = {}
self._eavesdropper = None
def attach(self, name, bridge):
assert name not in self._bridges, \
"bridge '%s' is already attached!" % name
self._bridges[name] = bridge
bridge.register(self)
def detach(self, name):
assert name in self._bridges, "bridge '%s' is not attached!" % name
self._bridges[name].deregister()
def _tr_detach(self, event):
name = self._bridge_name(event.source_id)
turned_empty = []
for channel_name, channel in self._channels.items():
if event.source_id in channel.bridges:
channel._bridge_leave(event.source_id)
if not len(channel.bridges):
turned_empty.append(channel_name)
for channel_name in turned_empty:
del self._channels[channel_name]
del self._bridges[name]
self._running = len(self._bridges) > 1
return True
def _bridge_name(self, bridge_id):
for name, bridge in self._bridges.items():
if id(bridge) == bridge_id:
return name
else:
raise KeyError("no bridge with id %s is attached" % bridge_id)
def _channel_name(self, channel_id):
for name, channel in self._channels.items():
if id(channel) == channel_id:
return name
else:
raise KeyError("no channel with id %s is attached" % channel_id)
def _ev_channel_join(self, event, name):
try:
channel = self._channels[name]
except KeyError:
channel = self._channels[name] = BridgeChannel(self)
bridge_id, users = event.source_id, channel.users.copy()
self._send_event(bridge_id, 'channel_add', id(channel), name, users)
channel._bridge_join(bridge_id)
def _ev_channel_leave(self, event, name):
self._channels[name]._bridge_leave(event.source_id)
channel = self._channels[name]
self._send_event(event.source_id, 'channel_remove', id(channel))
if not len(self._channels[name].bridges):
del self._channels[name]
def _ev_user_join(self, event, channel_id, user_id, name):
channel_name = self._channel_name(channel_id)
self._send_event(channel_id, 'user_add', user_id, name)
self._channels[channel_name]._user_join(user_id, name, event.source_id)
def _ev_user_change(self, event, channel_id, user_id, name):
channel_name = self._channel_name(channel_id)
self._send_event(channel_id, 'user_update', user_id, name)
self._channels[channel_name]._user_update(user_id, name)
def _ev_user_leave(self, event, channel_id, user_id):
channel_name = self._channel_name(channel_id)
self._channels[channel_name]._user_leave(user_id)
self._send_event(channel_id, 'user_remove', user_id)
def _tr_command(self, event, words, authority):
if len(words) == 0:
self._send_event(event.source_id, 'message',
"error: empty command")
return False
elif words[0] not in self._bridges:
self._send_event(event.source_id, 'message',
"error: '{}' no such bridge".format(words[0]))
return False
event.target_id = id(self._bridges[words[0]])
event.args = [words[1:], authority]
return True
def _ev_command(self, event, command, authority):
if len(command) == 0:
self._send_event(event.source_id, 'message',
"error: empty command")
return
handler = getattr(self, '_{}'.format(command[0]), None)
if is_command(handler):
try:
response = handler(*command[1:])
except Exception as e:
self._send_event(event.source_id, 'message',
"error: {}".format(e))
else:
if response is not None:
self._send_event(event.source_id, 'message', response)
else:
self._send_event(event.source_id, 'message', "error: '{}' "
"unkown command".format(command[0]))
def _ev_exception(self, event, exception):
raise exception
def _send_event(self, target, name, *args, **kwargs):
self.events.put(Event(self, target, name, *args, **kwargs))
def _dispatch(self, event):
handler = getattr(self, '_ev_{}'.format(event.name), None)
if handler is not None:
handler(event, *event.args, **event.kwargs)
def _translate(self, event):
handler = getattr(self, '_tr_{}'.format(event.name), None)
if handler is not None:
return handler(event, *event.args, **event.kwargs)
else:
return True
def once(self):
event = self.events.get()
if self._translate(event):
if self._eavesdropper is not None:
self._eavesdropper(event)
if event.target_id == Target.Everything:
bridges = self._bridges.values()
elif event.target_id == Target.Manager:
bridges = (self,)
elif event.target_id == Target.AllBridges:
bridges = (b for b in self._bridges.values() if b is not self)
elif event.target_id == Target.AllChannels:
channels = self._channels.values()
bridge_ids = {i for c in channels for i in c.bridges}
bridges = (b for b in self._bridges.values()
if id(b) in bridge_ids)
elif event.target_id == Target.AllUsers:
users = (u for c in self._channels.values() for u in c.users)
bridge_ids = {u['bridge_id'] for u in users}
bridges = (b for b in self._bridges.values()
if id(b) in bridge_ids)
else:
for bridge in self._bridges.values():
if id(bridge) == event.target_id:
bridges = (bridge,)
break
else:
for channel in self._channels.values():
if id(channel) == event.target_id:
bridges = (b for b in self._bridges.values()
if id(b) in channel.bridges)
break
elif event.target_id in channel.users:
user = channel.users[event.target_id]
bridges = (b for b in self._bridges.values()
if id(b) == user['bridge_id'])
break
else:
raise ValueError("invalid target")
for bridge in bridges:
bridge._dispatch(event)
def run(self):
self._running = True
try:
while self._running:
self.once()
finally:
self.terminate()
def terminate(self):
while True:
try:
name, bridge = self._bridges.popitem()
except KeyError:
break
if bridge is not self:
bridge.terminate()
@command
def _shutdown(self):
self._send_event(Target.AllBridges, 'shutdown')
| {
"content_hash": "ce92f7537429ebbb525e6c0e923ed861",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 79,
"avg_line_length": 35.06772908366534,
"alnum_prop": 0.5331742785730516,
"repo_name": "Hornwitser/YetiBridge",
"id": "aa8130544b974323e152dce7e225bf8b765f0c90",
"size": "8802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yetibridge/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59619"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_poolgroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PoolGroup Avi RESTful Object
description:
- This module is used to configure PoolGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for poolgroup.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
created_by:
description:
- Name of the user who created the object.
deployment_policy_ref:
description:
- When setup autoscale manager will automatically promote new pools into production when deployment goals are met.
- It is a reference to an object of type poolgroupdeploymentpolicy.
description:
description:
- Description of pool group.
fail_action:
description:
- Enable an action - close connection, http redirect, or local http response - when a pool group failure happens.
- By default, a connection will be closed, in case the pool group experiences a failure.
members:
description:
- List of pool group members object of type poolgroupmember.
min_servers:
description:
- The minimum number of servers to distribute traffic to.
- Allowed values are 1-65535.
- Special values are 0 - 'disable'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
name:
description:
- The name of the pool group.
required: true
priority_labels_ref:
description:
- Uuid of the priority labels.
- If not provided, pool group member priority label will be interpreted as a number with a larger number considered higher priority.
- It is a reference to an object of type prioritylabels.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the pool group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PoolGroup object
avi_poolgroup:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_poolgroup
"""
RETURN = '''
obj:
description: PoolGroup (api/poolgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
created_by=dict(type='str',),
deployment_policy_ref=dict(type='str',),
description=dict(type='str',),
fail_action=dict(type='dict',),
members=dict(type='list',),
min_servers=dict(type='int',),
name=dict(type='str', required=True),
priority_labels_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'poolgroup',
set([]))
if __name__ == '__main__':
main()
| {
"content_hash": "5eec2b896da949a42628b6aadc491cd6",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 144,
"avg_line_length": 33.84375,
"alnum_prop": 0.6170360110803325,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "8d7cbf5b22bda26ed516b9668904e7336e1ab0e2",
"size": "5191",
"binary": false,
"copies": "27",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_poolgroup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.