text
stringlengths 4
1.02M
| meta
dict |
|---|---|
__all__ = ['NetfuzzTool']
import socket
import urllib
import base64
from xnet.tools import Tool
def _tcp4_connect(addr):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
return sock
class Template(object):
_ESCAPE_SEQUENCES = (
('\\r', '\r'),
('\\n', '\n'),
)
def __init__(self, path, default_template):
if path:
f = open(path, 'rb')
self._template = f.read()
f.close()
else:
self._template = default_template
self._slices = self._template.split('{}')
self._nr_placeholders = len(self._slices) - 1
def _unescape(self, result):
result = result.decode('string-escape')
for (es, ch) in self._ESCAPE_SEQUENCES:
result = result.replace(es, ch)
return result
def sniper(self, payload, unescape=False):
s = self._slices
for i in xrange(1, len(s)):
head = ''.join(s[:i])
foot = ''.join(s[i:])
result = head + payload + foot
if unescape:
result = self._unescape(result)
yield result
class NetfuzzTool(Tool):
'''
NetfuzzTool
'''
__toolname__ = 'netfuzz'
__itemname__ = 'host'
__description__ = 'Send various payloads to target'
cmdline_options = [
('-a', '--address', 'Target address, specified as host:port',
dict(dest='address', metavar='addr')),
('-t', '--template', 'Template file containing {} placeholders where payloads are inserted',
dict(dest='template', metavar='path')),
('', '--tag-lines', 'tag output lines with insertion point index and payload',
dict(dest='tag_lines', action='store_true')),
('-u', '--urlenc', 'urlencode payloads',
dict(dest='urlenc', action='store_true')),
('-b', '--base64', 'base64-encode payloads',
dict(dest='base64', action='store_true')),
('-e', '--unescape', 'unescape in template and payload: \\r, \\n, \\xXX',
dict(dest='unescape', action='store_true')),
]
@classmethod
def print_source(cls):
cls._print_source(__file__)
def _new_connection(self):
return _tcp4_connect(self._addr)
@classmethod
def __setup__(cls, options):
cls._template = Template(options.template, '{}')
address = options.address
assert ':' in address
(host, port) = address.split(':')
assert port.isdigit()
cls._addr = (host, int(port))
conn = _tcp4_connect(cls._addr) # verify connectivity
conn.close()
#
# NOTE! passing dict from __massage__() to __parse__()
# in order to preserve payload info for instances.
#
@classmethod
def __massage__(cls, iterator, options):
for (pindex, payload) in enumerate(iterator):
if payload[-1] == '\n':
payload = payload[:-1]
if options.apply:
#
# We handle --apply "manually" as __massage__() passes
# dict to __parse__(). This makes the main class ignore
# --apply silently.
# Makes most sense to do this before base64 and urlenc.
#
payload = cls._apply(options.apply, payload)
if options.base64:
payload = base64.b64encode(payload)
if options.urlenc:
payload = urllib.quote(payload)
it = cls._template.sniper(payload, options.unescape)
for (inspoint, request) in enumerate(it):
yield {
'request': request,
'payload': payload,
'pindex': pindex,
'inspoint': inspoint,
}
def __parse__(self, item, iterator):
return item
def __action__(self, parse_result):
request = parse_result['request']
result = parse_result.copy()
conn = None
#
conn = self._new_connection()
conn.send(request)
response = conn.recv(65535)
if response and self.options.tag_lines:
tag = '{0},{1}:'.format(result['inspoint'], result['payload'])
response = '\n' + tag + response.replace('\n', '\n' + tag)
result['addr'] = '{0}:{1}'.format(*self._addr)
result['host'] = self._addr[0]
result['port'] = self._addr[1]
result['response'] = response
return result
def __filter__(self, action_result):
return action_result
def __format__(self, line, parse_result, action_result):
return action_result['response']
@classmethod
def __format_help__(cls):
output = '''
Format variables for %s:
addr - target address
host - target host
port - target port
response - server response
request - client request
payload - payload
pindex - payload index
inspoint - insertion point, starting from 0
time - time consumed by action, in ms
pid - PID of executing process
grid - Greenlet ID of executing process
Default format:
'{response}'
''' % cls.__toolname__
return output
def __timeout__(self):
if hasattr(self, '_addr'):
return '{0}:{1} wait-timeout\n'.format(*self._addr)
return None
def __filename__(self):
return 'pindex_{0}_inspoint_{1}'.format(
self._action_result['pindex'],
self._action_result['inspoint'],
)
def main():
import xnet.tools.run
xnet.tools.run.run(NetfuzzTool)
if __name__ == "__main__":
main()
#doctest.testmod()
#unittest.main()
|
{
"content_hash": "512f1c8c4b9ffe24ebdb6b68b68ff8c0",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 100,
"avg_line_length": 29.973821989528794,
"alnum_prop": 0.5390393013100436,
"repo_name": "kristerhedfors/xnet",
"id": "fcd4fd2d98feea1ec5b080b2d8e9e9bbe18531e0",
"size": "5812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xnet/tools/netfuzz.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "262153"
},
{
"name": "Shell",
"bytes": "1347"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import numpy as np
import math
from pyqtgraph.debug import Profiler
import acq4.util.functions as utilFn
from six.moves import range
def convertPtsToSparseImage(data, params, spacing=5e-6):
"""Function for converting a list of stimulation spots and their associated values into a fine-scale smoothed image.
data - a numpy record array which includes fields for 'xPos', 'yPos' and the parameters specified in params.
params - a list of values to set
spacing - the size of each pixel in the returned grid (default is 5um)
Return a 2D record array with fields for each param in params - if 2 or more data points fall in the same grid location
their values are averaged.
"""
if len(params) == 0:
return
## sanity checks
if params==None:
raise Exception("Don't know which parameters to process. Options are: %s" %str(data.dtype.names))
if 'xPos' not in data.dtype.names or 'yPos' not in data.dtype.names:
raise Exception("Data needs to have fields for 'xPos' and 'yPos'. Current fields are: %s" %str(data.dtype.names))
### Project data from current spacing onto finer grid, averaging data from duplicate spots
xmin = data['xPos'].min()
ymin = data['yPos'].min()
xdim = int((data['xPos'].max()-xmin)/spacing)+5
ydim = int((data['yPos'].max()-ymin)/spacing)+5
dtype = []
for p in params:
dtype.append((p, float))
dtype.append(('stimNumber', int))
#print xmin, data['xPos'].max(), spacing
#print len(data[data['xPos'] > 0.002]) + len(data[data['xPos'] < -0.002])
#print np.argwhere(data['xPos'] > 0.002)
#print xdim, ydim
arr = np.zeros((xdim, ydim), dtype=dtype)
for s in data:
x, y = (int((s['xPos']-xmin)/spacing), int((s['yPos']-ymin)/spacing))
for p in params:
arr[x,y][p] += s[p]
arr[x,y]['stimNumber'] += 1
arr['stimNumber'][arr['stimNumber']==0] = 1
for f in arr.dtype.names:
arr[f] = arr[f]/arr['stimNumber']
#arr = arr/arr['stimNumber']
arr = np.ascontiguousarray(arr)
return arr
def bendelsSpatialCorrelationAlgorithm(data, radius, spontRate, timeWindow, printProcess=False, eventsKey='numOfPostEvents'):
## check that data has 'xPos', 'yPos' and 'numOfPostEvents'
#SpatialCorrelator.checkArrayInput(data)
#prof = Profiler("bendelsSpatialCorrelationAlgorithm", disabled=True)
fields = data.dtype.names
if 'xPos' not in fields or 'yPos' not in fields or eventsKey not in fields:
raise HelpfulException("Array input needs to have the following fields: 'xPos', 'yPos', the field specified in *eventsKey*. Current fields are: %s" %str(fields))
#prof.mark("checked fields")
## add 'prob' field to data array
if 'prob' not in data.dtype.names:
arr = utilFn.concatenateColumns([data, np.zeros(len(data), dtype=[('prob', float)])])
#arr[:] = data
data = arr
else:
data['prob']=0
#prof.mark("set 'prob' field")
table = np.zeros((200, 200)) ## add a lookup table so that we don't have to calculate the same probabilities over and over...saves a bit of time
## spatial correlation algorithm from :
## Bendels, MHK; Beed, P; Schmitz, D; Johenning, FW; and Leibold C. Detection of input sites in
## scanning photostimulation data based on spatial correlations. 2010. Journal of Neuroscience Methods.
## calculate probability of seeing a spontaneous event in time window
p = 1-np.exp(-spontRate*timeWindow)
if printProcess:
print("====== Spontaneous Probability: %f =======" % p)
#prof.mark('calculated spontaneous probability')
## for each spot, calculate the probability of having the events in nearby spots occur randomly
for x in data:
spots = data[(np.sqrt((data['xPos']-x['xPos'])**2+(data['yPos']-x['yPos'])**2)) < radius]
nSpots = len(spots)
nEventSpots = len(spots[spots[eventsKey] > 0])
prob = 0
if table[nEventSpots, nSpots] != 0: ## try looking up value in table (it was stored there if we calculated it before), otherwise calculate it now
prob = table[nEventSpots, nSpots]
#prof.mark('look-up')
else:
for j in range(nEventSpots, nSpots+1):
a = ((p**j)*((1-p)**(nSpots-j))*math.factorial(nSpots))/(math.factorial(j)*math.factorial(nSpots-j))
if printProcess:
print(" Prob for %i events: %f Total: %f" %(j, a, prob+a))
prob += a
table[nEventSpots, nSpots] = prob
#prof.mark('calculate')
if printProcess: ## for debugging
print(" %i out of %i spots had events. Probability: %f" %(nEventSpots, nSpots, prob))
x['prob'] = prob
#prof.mark("calculated probabilities")
#prof.finish()
return data
def spatialCorrelationAlgorithm_ZScore(data, radius, printProcess=False, eventsKey='ZScore', spontKey='SpontZScore', threshold=1.645):
## check that data has 'xPos', 'yPos' and 'numOfPostEvents'
#SpatialCorrelator.checkArrayInput(data)
#prof = Profiler("bendelsSpatialCorrelationAlgorithm", disabled=True)
fields = data.dtype.names
if 'xPos' not in fields or 'yPos' not in fields or eventsKey not in fields or spontKey not in fields:
raise HelpfulException("Array input needs to have the following fields: 'xPos', 'yPos', the fields specified in *eventsKey* and *spontKey*. Current fields are: %s" %str(fields))
#prof.mark("checked fields")
## add 'prob' field to data array
if 'prob' not in data.dtype.names:
arr = utilFn.concatenateColumns([data, np.zeros(len(data), dtype=[('prob', float)])])
#arr[:] = data
data = arr
else:
data['prob']=0
#prof.mark("set 'prob' field")
table = np.zeros((200, 200)) ## add a lookup table so that we don't have to calculate the same probabilities over and over...saves a bit of time
## spatial correlation algorithm from :
## Bendels, MHK; Beed, P; Schmitz, D; Johenning, FW; and Leibold C. Detection of input sites in
## scanning photostimulation data based on spatial correlations. 2010. Journal of Neuroscience Methods.
## calculate probability of seeing a spontaneous event in time window -- for ZScore method, calculate probability that ZScore is spontaneously high
p = len(data[data[spontKey] < -threshold])/float(len(data))
#p = 1-np.exp(-spontRate*timeWindow)
#if printProcess:
# print "====== Spontaneous Probability: %f =======" % p
#prof.mark('calculated spontaneous probability')
## for each spot, calculate the probability of having the events in nearby spots occur randomly
for x in data:
spots = data[(np.sqrt((data['xPos']-x['xPos'])**2+(data['yPos']-x['yPos'])**2)) < radius]
nSpots = len(spots)
nEventSpots = len(spots[spots[eventsKey] < -threshold])
prob = 0
if table[nEventSpots, nSpots] != 0: ## try looking up value in table (it was stored there if we calculated it before), otherwise calculate it now
prob = table[nEventSpots, nSpots]
#prof.mark('look-up')
else:
for j in range(nEventSpots, nSpots+1):
a = ((p**j)*((1-p)**(nSpots-j))*math.factorial(nSpots))/(math.factorial(j)*math.factorial(nSpots-j))
if printProcess:
print(" Prob for %i events: %f Total: %f" %(j, a, prob+a))
prob += a
table[nEventSpots, nSpots] = prob
#prof.mark('calculate')
if printProcess: ## for debugging
print(" %i out of %i spots had events. Probability: %f" %(nEventSpots, nSpots, prob))
x['prob'] = prob
#prof.mark("calculated probabilities")
#prof.finish()
return data
|
{
"content_hash": "c4550d7439775fa69e65b93904b865b7",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 188,
"avg_line_length": 47.21052631578947,
"alnum_prop": 0.6221974482844048,
"repo_name": "pbmanis/acq4",
"id": "1b761d33de989ce5fa8c05c57f7a3a47e1a1b2ea",
"size": "8073",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "acq4/analysis/tools/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "518"
},
{
"name": "C",
"bytes": "1301111"
},
{
"name": "C++",
"bytes": "340035"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "3466867"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
from ems.auth import AccessChecker as AbstractChecker
class Permission(object):
def __init__(self, code='', title='', access=1):
super(Permission, self).__init__()
self.code = code
self.title = title
self.access = access
def __repr__(self):
return "<{0} code:{1} access:{2}>".format(self.__class__.__name__,self.code, self.access)
class PermissionHolder(object):
__metaclass__ = ABCMeta
@abstractmethod
def getPermissionAccess(self, code):
pass
@abstractmethod
def setPermissionAccess(self, code, access):
pass
@abstractmethod
def permissionCodes(self):
pass
class NestedPermissionHolder(object):
__metaclass__ = ABCMeta
@abstractmethod
def getSubHolders(self):
pass
class AccessChecker(AbstractChecker):
def hasAccess(self, user, resource, context='access'):
pass
|
{
"content_hash": "f832d639db5e42eb0e3ea54678afe8c4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 97,
"avg_line_length": 22.093023255813954,
"alnum_prop": 0.6431578947368422,
"repo_name": "mtils/ems",
"id": "6736295f59fd7e9713a5ff53909503edbbaf2ffb",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ems/auth/permission.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3631"
},
{
"name": "Python",
"bytes": "1924893"
},
{
"name": "QML",
"bytes": "16755"
}
],
"symlink_target": ""
}
|
import os
from java.awt import Font as JFont
from java.awt import BasicStroke, RenderingHints, GraphicsEnvironment
from java.awt.font import TextAttribute
from java.text import AttributedString
from java.lang import IllegalArgumentException
from java.awt.image import BufferedImage
from java.io import File
from pyj2d.surface import Surface
from pyj2d.color import Color
from pyj2d import env
__docformat__ = 'restructuredtext'
_initialized = False
_surf = None
_g2d = None
def init():
"""
**pyj2d.font.init**
Initialize font module.
"""
global _surf, _g2d, _initialized, match_font
_surf = Surface((1,1), BufferedImage.TYPE_INT_RGB)
_g2d = _surf.createGraphics()
_initialized = True
init()
def quit():
"""
**pyj2d.font.quit**
Unintialize font module.
"""
global _surf, _g2d, _initialized
_g2d.dispose()
_g2d = None
_surf = None
_initialized = False
def get_init():
"""
**pyj2d.font.get_init**
Check if font module is intialized.
"""
return _initialized
def get_default_font():
"""
**pyj2d.font.get_default_font**
Return default font.
"""
fonts = get_fonts()
for fontfamily in Font._font_family:
for font in fontfamily:
if font in fonts:
return font
return fonts[0]
def get_fonts():
"""
**pyj2d.font.get_fonts**
Return fonts available in JVM.
"""
return [''.join([c for c in f if c.isalnum()]).lower() for f in
GraphicsEnvironment.getLocalGraphicsEnvironment().getAvailableFontFamilyNames()]
def match_font(name, *args, **kwargs):
"""
**pyj2d.font.match_font**
Argument name is a font name, or comma-delimited string of font names.
Return font found on system, otherwise return None if none found.
"""
font = [''.join([c for c in f if c.isalnum()]).lower()
for f in name.split(',')]
fonts = get_fonts()
for fn in font:
if fn in fonts:
return fn
return None
class Font(JFont):
"""
**pyj2d.font.Font**
* Font.render
* Font.size
* Font.set_underline
* Font.get_underline
* Font.set_bold
* Font.get_bold
* Font.set_italic
* Font.get_italic
* Font.get_height
* Font.get_linesize
* Font.get_ascent
* Font.get_descent
"""
_font = None
_font_default = None
_font_family = [['arial', 'helvetica', 'liberationsans', 'nimbussansl',
'freesans', 'tahoma', 'sansserif'],
['verdana', 'bitstreamverasans', 'dejavusans', 'sansserif'],
['impact', 'sansserif'],
['comicsansms', 'cursive', 'sansserif'],
['couriernew', 'courier', 'lucidaconsole',
'dejavusansmono', 'monospace'],
['timesnewroman', 'times', 'liberationserif',
'nimbusromanno9l', 'serif'],
['garamond', 'bookantiqua', 'palatino',
'liberationserif', 'nimbusromanno9l', 'serif'],
['georgia', 'bitstreamveraserif', 'lucidaserif',
'liberationserif', 'dejavuserif', 'serif']]
def __init__(self, name, size):
"""
Return Font subclassed of java.awt.Font.
Arguments include name of a system font and size of font. The name argument can be a string of comma-delimited names to specify fallbacks and use a default font if none found, or specify a font file (eg. 'resource/font.ttf') with a exception if file not found.
"""
if not Font._font:
Font._font = get_fonts()
Font._font_default = get_default_font()
self.fontname, isFile = self._getFontName(name)
self.fontsize = size
if not hasattr(self, 'fontstyle'):
self.fontstyle = JFont.PLAIN
if not isFile:
JFont.__init__(self,
self.fontname,
self.fontstyle,
self.fontsize)
else:
font = self._getFont(self.fontname,
self.fontstyle,
self.fontsize)
JFont.__init__(self, font)
self.font = self
_g2d.setFont(self.font)
self.fontMetrics = _g2d.getFontMetrics()
self.underline = False
self._nonimplemented_methods()
def __str__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
def _getFontName(self, name):
isFile = False
if not name:
return Font._font_default, isFile
if name.split('.')[-1].lower() == 'ttf':
isFile = True
return name, isFile
name = [''.join([c for c in f if c.isalnum()]).lower()
for f in name.split(',')]
for fn in name:
if fn in Font._font:
return fn, isFile
for fn in name:
for ff in Font._font_family:
if fn in ff:
for font in ff:
if font in Font._font:
return font, isFile
return Font._font_default, isFile
def _getFont(self, name, style, size):
name = os.path.normpath(name)
dirname, self.fontname = os.path.split(name)
fontpath = os.path.join(dirname,self.fontname)
if not env.japplet:
font = self.createFont(JFont.TRUETYPE_FONT, File(fontpath))
else:
font = self.createFont(JFont.TRUETYPE_FONT,
env.japplet.getClass().getResourceAsStream(fontpath))
if not font:
raise IOError
return font.deriveFont(style, float(size))
def render(self, text, antialias, color, background=None):
"""
Render text onto surface.
Arguments:
text to render (string)
antialias of text (bool)
color of text (R,G,B)
background color (R,G,B)
"""
w,h = self.size(text)
surf = Surface((w,h), BufferedImage.TYPE_INT_ARGB)
g2d = surf.createGraphics()
if background:
g2d.setColor(Color(background))
g2d.fillRect(0,0,w,h)
g2d.setFont(self.font)
if antialias:
g2d.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING,
RenderingHints.VALUE_TEXT_ANTIALIAS_ON)
g2d.setColor(Color(color))
y = ((h//2)
- ((self.fontMetrics.getAscent()+self.fontMetrics.getDescent())//2)
+ self.fontMetrics.getAscent())
if not self.underline:
g2d.drawString(text,0,y)
else:
try:
text = AttributedString(text)
text.addAttribute(TextAttribute.FONT, self.font)
text.addAttribute(TextAttribute.UNDERLINE,
TextAttribute.UNDERLINE_ON)
g2d.drawString(text.getIterator(),0,y)
except IllegalArgumentException:
pass
g2d.dispose()
return surf
def size(self, text):
"""
Return size (width,height) of rendered text.
"""
width = self.fontMetrics.stringWidth(text)
if width < 1:
width = 1
height = self.fontMetrics.getHeight()
return (width, height)
def set_underline(self, setting=True):
"""
Set font underline style.
Optional setting, default to True.
"""
self.underline = setting
def get_underline(self):
"""
Check if font is underlined.
"""
return self.underline
def set_bold(self, setting=True):
"""
Set font bold style.
Optional setting, default to True.
"""
if setting:
if self.font.isItalic():
self.font = self.deriveFont(JFont.BOLD | JFont.ITALIC)
else:
self.font = self.deriveFont(JFont.BOLD)
else:
if self.font.isItalic():
self.font = self.deriveFont(JFont.ITALIC)
else:
self.font = self.deriveFont(JFont.PLAIN)
_g2d.setFont(self.font)
self.fontMetrics = _g2d.getFontMetrics()
def get_bold(self):
"""
Check if font is bold.
"""
return self.font.isBold()
def set_italic(self, setting=True):
"""
Set font italic style.
Optional setting, default to True.
"""
if setting:
if self.font.isBold():
self.font = self.deriveFont(JFont.BOLD | JFont.ITALIC)
else:
self.font = self.deriveFont(JFont.ITALIC)
else:
if self.font.isBold():
self.font = self.deriveFont(JFont.BOLD)
else:
self.font = self.deriveFont(JFont.PLAIN)
_g2d.setFont(self.font)
self.fontMetrics = _g2d.getFontMetrics()
def get_italic(self):
"""
Check if font is italized.
"""
return self.font.isItalic()
def get_height(self):
"""
Return height of font.
"""
return self.fontMetrics.getHeight()
def get_linesize(self):
"""
Return linesize of font.
"""
return self.fontMetrics.getHeight()
def get_ascent(self):
"""
Return ascent of font.
"""
return self.fontMetrics.getAscent()
def get_descent(self):
"""
Return descent of font.
"""
return self.fontMetrics.getDescent()
def _nonimplemented_methods(self):
self.metrics = lambda *arg: []
class SysFont(Font):
"""
**pyj2d.font.SysFont**
* Font subclass
"""
def __init__(self, name, size, bold=False, italic=False):
"""
Return SysFont subclassed of Font.
Arguments include name of a system font and size of font, with optional bold and italic style. The name argument can be a string of comma-delimited names to specify fallbacks and use a default font if none found.
"""
self.fontstyle = JFont.PLAIN
if bold:
self.fontstyle |= JFont.BOLD
if italic:
self.fontstyle |= JFont.ITALIC
Font.__init__(self,name,size)
|
{
"content_hash": "c6dadd0ad8efbded32d394ea4d9137e9",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 268,
"avg_line_length": 29.64589235127479,
"alnum_prop": 0.546201624462494,
"repo_name": "jggatc/pyj2d",
"id": "658e4063a1882363dc27620981e7ebc5608640c7",
"size": "10596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyj2d/font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "12269"
},
{
"name": "Python",
"bytes": "259594"
}
],
"symlink_target": ""
}
|
default_app_config = 'danceschool.private_lessons.apps.PrivateLessonsConfig'
|
{
"content_hash": "4c1a2ef4ee5395077401c2943a30e6dc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 76,
"avg_line_length": 77,
"alnum_prop": 0.8441558441558441,
"repo_name": "django-danceschool/django-danceschool",
"id": "416bad4cc6aade5d722d27b5a1dd2c27b7344ef9",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "danceschool/private_lessons/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55309"
},
{
"name": "HTML",
"bytes": "334988"
},
{
"name": "JavaScript",
"bytes": "2008559"
},
{
"name": "Less",
"bytes": "21246"
},
{
"name": "Python",
"bytes": "1856445"
},
{
"name": "SCSS",
"bytes": "9564"
}
],
"symlink_target": ""
}
|
"""
Form Widget classes specific to the Django admin site.
"""
import copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.TextInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'})
class AdminTimeWidget(forms.TextInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'})
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.FileInput):
"""
A FileField Widget that shows its current value if it has one.
"""
def __init__(self, attrs={}):
super(AdminFileWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "url"):
output.append('%s <a target="_blank" href="%s">%s</a> <br />%s ' % \
(_('Currently:'), value.url, value, _('Change:')))
output.append(super(AdminFileWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None):
self.rel = rel
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if not attrs.has_key('class'):
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append('<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
params = {}
if self.rel.limit_choices_to:
items = []
for k, v in self.rel.limit_choices_to.items():
if isinstance(v, list):
v = [str(x) for x in v]
else:
v = str(v)
items.append((k, ','.join(v)))
params.update(dict(items))
return params
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return ' <strong>%s</strong>' % truncate_words(obj, 14)
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def __init__(self, rel, attrs=None):
super(ManyToManyRawIdWidget, self).__init__(rel, attrs)
def render(self, name, value, attrs=None):
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([str(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value and ',' in value:
return data[name].split(',')
if value:
return [value]
return None
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
related_url = '../../../%s/%s/' % (rel_to._meta.app_label, rel_to._meta.object_name.lower())
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if rel_to in self.admin_site._registry: # If the related object has an admin interface:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%sadd/" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
|
{
"content_hash": "ea0212e603d9d2d40010cf92dd191dde",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 157,
"avg_line_length": 40.03928571428571,
"alnum_prop": 0.6126126126126126,
"repo_name": "dcramer/django-compositepks",
"id": "e2cd2113f800a6f62e0160ba2afe4a75b215eab0",
"size": "11211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/admin/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "76967"
},
{
"name": "Python",
"bytes": "4145905"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
# default_application, default_controller, default_function
# are used when the respective element is missing from the
# (possibly rewritten) incoming URL
#
default_application = 'init' # ordinarily set in base routes.py
default_controller = 'default' # ordinarily set in app-specific routes.py
default_function = 'index' # ordinarily set in app-specific routes.py
# routes_app is a tuple of tuples. The first item in each is a regexp that will
# be used to match the incoming request URL. The second item in the tuple is
# an applicationname. This mechanism allows you to specify the use of an
# app-specific routes.py. This entry is meaningful only in the base routes.py.
#
# Example: support welcome, admin, app and myapp, with myapp the default:
routes_app = ((r'/(?P<app>welcome|admin|app)\b.*', r'\g<app>'),
(r'(.*)', r'myapp'),
(r'/?(.*)', r'myapp'))
# routes_in is a tuple of tuples. The first item in each is a regexp that will
# be used to match the incoming request URL. The second item in the tuple is
# what it will be replaced with. This mechanism allows you to redirect incoming
# routes to different web2py locations
#
# Example: If you wish for your entire website to use init's static directory:
#
# routes_in=( (r'/static/(?P<file>[\w./-]+)', r'/init/static/\g<file>') )
#
BASE = '' # optonal prefix for incoming URLs
routes_in = (
# do not reroute admin unless you want to disable it
(BASE + '/admin', '/admin/default/index'),
(BASE + '/admin/$anything', '/admin/$anything'),
# do not reroute appadmin unless you want to disable it
(BASE + '/$app/appadmin', '/$app/appadmin/index'),
(BASE + '/$app/appadmin/$anything', '/$app/appadmin/$anything'),
# do not reroute static files
(BASE + '/$app/static/$anything', '/$app/static/$anything'),
# reroute favicon and robots, use exable for lack of better choice
('/favicon.ico', '/examples/static/favicon.ico'),
('/robots.txt', '/examples/static/robots.txt'),
# do other stuff
((r'.*http://otherdomain.com.* (?P<any>.*)', r'/app/ctr\g<any>')),
# remove the BASE prefix
(BASE + '/$anything', '/$anything'),
)
# routes_out, like routes_in translates URL paths created with the web2py URL()
# function in the same manner that route_in translates inbound URL paths.
#
routes_out = (
# do not reroute admin unless you want to disable it
('/admin/$anything', BASE + '/admin/$anything'),
# do not reroute appadmin unless you want to disable it
('/$app/appadmin/$anything', BASE + '/$app/appadmin/$anything'),
# do not reroute static files
('/$app/static/$anything', BASE + '/$app/static/$anything'),
# do other stuff
(r'.*http://otherdomain.com.* /app/ctr(?P<any>.*)', r'\g<any>'),
(r'/app(?P<any>.*)', r'\g<any>'),
# restore the BASE prefix
('/$anything', BASE + '/$anything'),
)
# Specify log level for rewrite's debug logging
# Possible values: debug, info, warning, error, critical (loglevels),
# off, print (print uses print statement rather than logging)
# GAE users may want to use 'off' to suppress routine logging.
#
logging = 'debug'
# Error-handling redirects all HTTP errors (status codes >= 400) to a specified
# path. If you wish to use error-handling redirects, uncomment the tuple
# below. You can customize responses by adding a tuple entry with the first
# value in 'appName/HTTPstatusCode' format. ( Only HTTP codes >= 400 are
# routed. ) and the value as a path to redirect the user to. You may also use
# '*' as a wildcard.
#
# The error handling page is also passed the error code and ticket as
# variables. Traceback information will be stored in the ticket.
#
# routes_onerror = [
# (r'init/400', r'/init/default/login')
# ,(r'init/*', r'/init/static/fail.html')
# ,(r'*/404', r'/init/static/cantfind.html')
# ,(r'*/*', r'/init/error/index')
# ]
# specify action in charge of error handling
#
# error_handler = dict(application='error',
# controller='default',
# function='index')
# In the event that the error-handling page itself returns an error, web2py will
# fall back to its old static responses. You can customize them here.
# ErrorMessageTicket takes a string format dictionary containing (only) the
# "ticket" key.
# error_message = '<html><body><h1>%s</h1></body></html>'
# error_message_ticket = '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body></html>'
# specify a list of apps that bypass args-checking and use request.raw_args
#
#routes_apps_raw=['myapp']
#routes_apps_raw=['myapp', 'myotherapp']
def __routes_doctest():
'''
Dummy function for doctesting routes.py.
Use filter_url() to test incoming or outgoing routes;
filter_err() for error redirection.
filter_url() accepts overrides for method and remote host:
filter_url(url, method='get', remote='0.0.0.0', out=False)
filter_err() accepts overrides for application and ticket:
filter_err(status, application='app', ticket='tkt')
>>> import os
>>> import gluon.main
>>> from gluon.rewrite import regex_select, load, filter_url, regex_filter_out, filter_err, compile_regex
>>> regex_select()
>>> load(routes=os.path.basename(__file__))
>>> os.path.relpath(filter_url('http://domain.com/favicon.ico'))
'applications/examples/static/favicon.ico'
>>> os.path.relpath(filter_url('http://domain.com/robots.txt'))
'applications/examples/static/robots.txt'
>>> filter_url('http://domain.com')
'/init/default/index'
>>> filter_url('http://domain.com/')
'/init/default/index'
>>> filter_url('http://domain.com/init/default/fcn')
'/init/default/fcn'
>>> filter_url('http://domain.com/init/default/fcn/')
'/init/default/fcn'
>>> filter_url('http://domain.com/app/ctr/fcn')
'/app/ctr/fcn'
>>> filter_url('http://domain.com/app/ctr/fcn/arg1')
"/app/ctr/fcn ['arg1']"
>>> filter_url('http://domain.com/app/ctr/fcn/arg1/')
"/app/ctr/fcn ['arg1']"
>>> filter_url('http://domain.com/app/ctr/fcn/arg1//')
"/app/ctr/fcn ['arg1', '']"
>>> filter_url('http://domain.com/app/ctr/fcn//arg1')
"/app/ctr/fcn ['', 'arg1']"
>>> filter_url('HTTP://DOMAIN.COM/app/ctr/fcn')
'/app/ctr/fcn'
>>> filter_url('http://domain.com/app/ctr/fcn?query')
'/app/ctr/fcn ?query'
>>> filter_url('http://otherdomain.com/fcn')
'/app/ctr/fcn'
>>> regex_filter_out('/app/ctr/fcn')
'/ctr/fcn'
>>> filter_url('https://otherdomain.com/app/ctr/fcn', out=True)
'/ctr/fcn'
>>> filter_url('https://otherdomain.com/app/ctr/fcn/arg1//', out=True)
'/ctr/fcn/arg1//'
>>> filter_url('http://otherdomain.com/app/ctr/fcn', out=True)
'/fcn'
>>> filter_url('http://otherdomain.com/app/ctr/fcn?query', out=True)
'/fcn?query'
>>> filter_url('http://otherdomain.com/app/ctr/fcn#anchor', out=True)
'/fcn#anchor'
>>> filter_err(200)
200
>>> filter_err(399)
399
>>> filter_err(400)
400
>>> filter_url('http://domain.com/welcome', app=True)
'welcome'
>>> filter_url('http://domain.com/', app=True)
'myapp'
>>> filter_url('http://domain.com', app=True)
'myapp'
>>> compile_regex('.*http://otherdomain.com.* (?P<any>.*)', '/app/ctr\g<any>')[0].pattern
'^.*http://otherdomain.com.* (?P<any>.*)$'
>>> compile_regex('.*http://otherdomain.com.* (?P<any>.*)', '/app/ctr\g<any>')[1]
'/app/ctr\\\\g<any>'
>>> compile_regex('/$c/$f', '/init/$c/$f')[0].pattern
'^.*?:https?://[^:/]+:[a-z]+ /(?P<c>\\\\w+)/(?P<f>\\\\w+)$'
>>> compile_regex('/$c/$f', '/init/$c/$f')[1]
'/init/\\\\g<c>/\\\\g<f>'
'''
pass
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "f8e784b908b2dc3b2dccc20844def966",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 164,
"avg_line_length": 40.69346733668342,
"alnum_prop": 0.6159545566806619,
"repo_name": "ccpgames/eve-metrics",
"id": "b6a5531530337d39182116fbdb1e18d152c4306c",
"size": "8098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web2py/routes.example.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "196893"
},
{
"name": "HTML",
"bytes": "596704"
},
{
"name": "JavaScript",
"bytes": "2638362"
},
{
"name": "Makefile",
"bytes": "6502"
},
{
"name": "PHP",
"bytes": "15326"
},
{
"name": "Python",
"bytes": "6657223"
},
{
"name": "Shell",
"bytes": "92054"
},
{
"name": "Tcl",
"bytes": "76950"
}
],
"symlink_target": ""
}
|
import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "******************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF file
SourceFile = ".\\sample.pdf"
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
convertPdfToImage(uploadedFileUrl)
def convertPdfToImage(uploadedFileUrl):
"""Converts PDF To Image using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["password"] = Password
parameters["pages"] = Pages
parameters["url"] = uploadedFileUrl
# Prepare URL for 'PDF To JPG' API request
url = "{}/pdf/convert/to/jpg".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Download generated JPG files
part = 1
for resultFileUrl in json["urls"]:
# Download Result File
r = requests.get(resultFileUrl, stream=True)
localFileUrl = f"Page{part}.jpg"
if r.status_code == 200:
with open(localFileUrl, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{localFileUrl}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
part = part + 1
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main()
|
{
"content_hash": "0ba97f54bd0fb2170840bf5380e08419",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 128,
"avg_line_length": 32.25961538461539,
"alnum_prop": 0.585096870342772,
"repo_name": "bytescout/ByteScout-SDK-SourceCode",
"id": "2911c5c3c9b1a0a6499bcf1910ab6aecadc672ea",
"size": "3355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PDF.co Web API/PDF To JPG/Python/Convert PDF To Image From Uploaded File/ConvertPdfToImageFromUploadedFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "364116"
},
{
"name": "Apex",
"bytes": "243500"
},
{
"name": "Batchfile",
"bytes": "151832"
},
{
"name": "C",
"bytes": "224568"
},
{
"name": "C#",
"bytes": "12909855"
},
{
"name": "C++",
"bytes": "440474"
},
{
"name": "CSS",
"bytes": "56817"
},
{
"name": "Classic ASP",
"bytes": "46655"
},
{
"name": "Dockerfile",
"bytes": "776"
},
{
"name": "Gherkin",
"bytes": "3386"
},
{
"name": "HTML",
"bytes": "17276296"
},
{
"name": "Java",
"bytes": "1483408"
},
{
"name": "JavaScript",
"bytes": "3033610"
},
{
"name": "PHP",
"bytes": "838746"
},
{
"name": "Pascal",
"bytes": "398090"
},
{
"name": "PowerShell",
"bytes": "715204"
},
{
"name": "Python",
"bytes": "703542"
},
{
"name": "QMake",
"bytes": "880"
},
{
"name": "TSQL",
"bytes": "3080"
},
{
"name": "VBA",
"bytes": "383773"
},
{
"name": "VBScript",
"bytes": "1504410"
},
{
"name": "Visual Basic .NET",
"bytes": "9489450"
}
],
"symlink_target": ""
}
|
import os
from bf1.exceptions import EnvVarNotSet
def get_token():
try:
return os.environ['API_KEY']
except KeyError:
raise EnvVarNotSet('API_KEY')
API_KEY = get_token()
METHOD = 'get'
|
{
"content_hash": "d312040c5052dafa6c3b802d7a514b12",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 39,
"avg_line_length": 15.285714285714286,
"alnum_prop": 0.6448598130841121,
"repo_name": "Girbons/battlefield1-sdk",
"id": "d1bda1a134a5071ea016f661f87ff6178cd46c9c",
"size": "214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51016"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('sub1')
test.write('SConstruct', """\
env=Environment()
Export('env')
env.SConscript('SConscript', variant_dir='Build')
""")
test.write('SConscript', """\
Import('env')
env.Command('foo.out', 'SConscript', Copy('$TARGET', '$SOURCE'))
""")
test.run(arguments = "--debug=duplicate -Q",
stdout='.*relinking variant.*', match=TestSCons.match_re_dotall)
test.must_exist('Build/foo.out')
test.write('SConscript', """\
# different this time!
Import('env')
env.Command('foo.out', 'SConscript', Copy('$TARGET', '$SOURCE'))
""")
test.run(arguments = "--debug=duplicate -Q",
stdout='.*relinking variant.*removing existing target.*',
match=TestSCons.match_re_dotall)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('sub1')
test.write('SConstruct', """\
env=Environment()
Export('env')
env.SConscript('SConscript', variant_dir='Build')
""")
test.write('SConscript', """\
Import('env')
env.Command('foo.out', 'SConscript', Copy('$TARGET', '$SOURCE'))
""")
test.run(arguments = "--debug=duplicate -Q",
stdout='.*relinking variant.*', match=TestSCons.match_re_dotall)
test.must_exist('Build/foo.out')
test.write('SConscript', """\
# different this time!
Import('env')
env.Command('foo.out', 'SConscript', Copy('$TARGET', '$SOURCE'))
""")
test.run(arguments = "--debug=duplicate -Q",
stdout='.*relinking variant.*removing existing target.*',
match=TestSCons.match_re_dotall)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('sub1')
test.write('SConstruct', """\
env=Environment()
Export('env')
env.SConscript('SConscript', variant_dir='Build')
""")
test.write('SConscript', """\
Import('env')
env.Command('foo.out', 'SConscript', Copy('$TARGET', '$SOURCE'))
""")
test.run(arguments = "--debug=duplicate -Q",
stdout='.*relinking variant.*', match=TestSCons.match_re_dotall)
test.must_exist('Build/foo.out')
test.write('SConscript', """\
# different this time!
Import('env')
env.Command('foo.out', 'SConscript', Copy('$TARGET', '$SOURCE'))
""")
test.run(arguments = "--debug=duplicate -Q",
stdout='.*relinking variant.*removing existing target.*',
match=TestSCons.match_re_dotall)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "a6d7e53d21d28614aee742f8df80972e",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 73,
"avg_line_length": 28.875706214689266,
"alnum_prop": 0.7065153590295441,
"repo_name": "azatoth/scons",
"id": "e46a53534f1f1ce2c283460e8687ef7d92e7683e",
"size": "6213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/option/debug-duplicate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6716123"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
}
|
"""Module containing Hadoop installation and cleanup functions.
For documentation of commands to run at startup and shutdown, see:
http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Hadoop_Startup
"""
import functools
import logging
import os
import posixpath
import re
import time
from perfkitbenchmarker import data
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import vm_util
HADOOP_VERSION = '2.5.2'
HADOOP_URL = ('http://www.us.apache.org/dist/hadoop/common/hadoop-{0}/'
'hadoop-{0}.tar.gz').format(HADOOP_VERSION)
DATA_FILES = ['hadoop/core-site.xml.j2', 'hadoop/yarn-site.xml.j2',
'hadoop/hdfs-site.xml', 'hadoop/mapred-site.xml',
'hadoop/hadoop-env.sh.j2', 'hadoop/slaves.j2']
START_HADOOP_SCRIPT = 'hadoop/start-hadoop.sh.j2'
HADOOP_DIR = posixpath.join(vm_util.VM_TMP_DIR, 'hadoop')
HADOOP_BIN = posixpath.join(HADOOP_DIR, 'bin')
HADOOP_SBIN = posixpath.join(HADOOP_DIR, 'sbin')
HADOOP_CONF_DIR = posixpath.join(HADOOP_DIR, 'etc', 'hadoop')
HADOOP_PRIVATE_KEY = posixpath.join(HADOOP_CONF_DIR, 'hadoop_keyfile')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in DATA_FILES + [START_HADOOP_SCRIPT]:
data.ResourcePath(resource)
def _Install(vm):
vm.Install('openjdk7')
vm.Install('curl')
vm.RemoteCommand(('mkdir {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
HADOOP_DIR, HADOOP_URL))
def YumInstall(vm):
"""Installs Hadoop on the VM."""
vm.InstallPackages('snappy snappy-devel')
_Install(vm)
def AptInstall(vm):
"""Installs Hadoop on the VM."""
vm.InstallPackages('libsnappy1 libsnappy-dev')
_Install(vm)
# TODO: revisit memory fraction.
def _RenderConfig(vm, master_ip, worker_ips, memory_fraction=0.9):
yarn_memory_mb = int((vm.total_memory_kb / 1024) * memory_fraction)
context = {
'master_ip': master_ip,
'worker_ips': worker_ips,
'scratch_dir': posixpath.join(vm.GetScratchDir(), 'hadoop'),
'vcpus': vm.num_cpus,
'hadoop_private_key': HADOOP_PRIVATE_KEY,
'yarn_memory_mb': yarn_memory_mb
}
for file_name in DATA_FILES:
file_path = data.ResourcePath(file_name)
remote_path = posixpath.join(HADOOP_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def _GetHDFSOnlineNodeCount(master):
cmd = '{0} dfsadmin -report'.format(posixpath.join(HADOOP_BIN, 'hdfs'))
stdout = master.RemoteCommand(cmd)[0]
avail_str = regex_util.ExtractGroup(r'Live datanodes\s+\((\d+)\):', stdout)
return int(avail_str)
def _GetYARNOnlineNodeCount(master):
cmd = '{0} node -list -all'.format(posixpath.join(HADOOP_BIN, 'yarn'))
stdout = master.RemoteCommand(cmd)[0]
return len(re.findall(r'RUNNING', stdout))
def ConfigureAndStart(master, workers, start_yarn=True):
"""Configure hadoop on a cluster.
Args:
master: VM. Master VM - will be the HDFS NameNode, YARN ResourceManager.
workers: List of VMs. Each VM will run an HDFS DataNode, YARN node.
start_yarn: bool. Start YARN and JobHistory server? Set to False if HDFS is
the only service required. Default: True.
"""
vms = [master] + workers
fn = functools.partial(_RenderConfig, master_ip=master.internal_ip,
worker_ips=[worker.internal_ip for worker in workers])
vm_util.RunThreaded(fn, vms)
master.RemoteCommand(
"rm -f {0} && ssh-keygen -q -t rsa -N '' -f {0}".format(
HADOOP_PRIVATE_KEY))
public_key = master.RemoteCommand('cat {0}.pub'.format(HADOOP_PRIVATE_KEY))[0]
def AddKey(vm):
vm.RemoteCommand('echo "{0}" >> ~/.ssh/authorized_keys'.format(public_key))
vm_util.RunThreaded(AddKey, vms)
context = {'hadoop_dir': HADOOP_DIR,
'vm_ips': [vm.internal_ip for vm in vms],
'start_yarn': start_yarn}
# HDFS setup and formatting, YARN startup
script_path = posixpath.join(HADOOP_DIR, 'start-hadoop.sh')
master.RenderTemplate(data.ResourcePath(START_HADOOP_SCRIPT),
script_path, context=context)
master.RemoteCommand('bash {0}'.format(script_path), should_log=True)
logging.info('Sleeping 10s for Hadoop nodes to join.')
time.sleep(10)
logging.info('Checking HDFS status.')
hdfs_online_count = _GetHDFSOnlineNodeCount(master)
if hdfs_online_count != len(workers):
raise ValueError('Not all nodes running HDFS: {0} < {1}'.format(
hdfs_online_count, len(workers)))
else:
logging.info('HDFS running on all %d workers', len(workers))
if start_yarn:
logging.info('Checking YARN status.')
yarn_online_count = _GetYARNOnlineNodeCount(master)
if yarn_online_count != len(workers):
raise ValueError('Not all nodes running YARN: {0} < {1}'.format(
yarn_online_count, len(workers)))
else:
logging.info('YARN running on all %d workers', len(workers))
def StopYARN(master):
"""Stop YARN on all nodes."""
master.RemoteCommand(posixpath.join(HADOOP_SBIN, 'stop-yarn.sh'))
def StopHDFS(master):
"""Stop HDFS on all nodes."""
master.RemoteCommand(posixpath.join(HADOOP_SBIN, 'stop-dfs.sh'))
def StopHistoryServer(master):
"""Stop the MapReduce JobHistory daemon."""
master.RemoteCommand('{0} stop historyserver'.format(
posixpath.join(HADOOP_SBIN, 'mr-jobhistory-daemon.sh')))
def StopAll(master):
"""Stop HDFS and YARN.
Args:
master: VM. HDFS NameNode/YARN ResourceManager.
"""
StopHistoryServer(master)
StopYARN(master)
StopHDFS(master)
def CleanDatanode(vm):
"""Delete Hadoop data from 'vm'."""
vm.RemoteCommand('rm -rf {0}'.format(
posixpath.join(vm.GetScratchDir(), 'hadoop')))
|
{
"content_hash": "9035e62b10f468340b152adfcb6e4299",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 104,
"avg_line_length": 32.69398907103825,
"alnum_prop": 0.6770850743774026,
"repo_name": "mateusz-blaszkowski/PerfKitBenchmarker",
"id": "b3cc906fd9d15916921495290cfd497ff28c40bc",
"size": "6593",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/linux_packages/hadoop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1282006"
},
{
"name": "Shell",
"bytes": "23160"
}
],
"symlink_target": ""
}
|
import sys, requests, uuid
url, filepath = sys.argv[1:3]
if len(sys.argv) > 3:
inp = sys.argv[3]
else:
inp = 'nexus'
content = open(filepath, 'rU').read()
data = {'inputFormat': inp,
'idPrefix': '',
'dataDeposit': 'http://example.org',
'content': content,
}
r = requests.post(url, data=data)
try:
blob = r.json()
print json.dumps(blob['data'],indent=1, sort_keys=True)
print 'uploadid =', blob['uploadid']
except:
print r.text
if 200 != r.status_code:
sys.exit(r.status_code)
try:
trees = blob['data']['nexml']['trees']
sys.exit('Unexpected trees found!')
except Exception:
pass
|
{
"content_hash": "7242bf327467d493414599cdcb2536e2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 59,
"avg_line_length": 25.03846153846154,
"alnum_prop": 0.6006144393241167,
"repo_name": "OpenTreeOfLife/opentree",
"id": "cac24d21197e2c2b6ceecd158549c4714bc2985d",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curator/test/testNoTreesString2nexml.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "91586"
},
{
"name": "Go",
"bytes": "6808"
},
{
"name": "HTML",
"bytes": "728798"
},
{
"name": "JavaScript",
"bytes": "2145214"
},
{
"name": "Less",
"bytes": "159886"
},
{
"name": "Makefile",
"bytes": "613"
},
{
"name": "PHP",
"bytes": "52477"
},
{
"name": "Python",
"bytes": "750874"
},
{
"name": "Shell",
"bytes": "4890"
}
],
"symlink_target": ""
}
|
from django.conf import settings
def account(request):
return {
"CONTACT_EMAIL": getattr(settings, "CONTACT_EMAIL", "support@example.com")
}
|
{
"content_hash": "efe0adbba2eeedd067f169a43067151f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 82,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6772151898734177,
"repo_name": "uroslates/django-allauth",
"id": "95867b2ecfd1574a2ca5d41bdd247971f6df19e4",
"size": "158",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "allauth/account/context_processors.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
__copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import pytest
import scine_utilities as scine
def test_ElementTypes():
# Test the string magic function
assert str(scine.ElementType.K) == "K"
assert str(scine.ElementType.Cl) == "Cl"
# Test the arithmetic properties of the enum
assert scine.ElementType.Ar == scine.ElementType.Ar
assert scine.ElementType.Ar != scine.ElementType.Ti
|
{
"content_hash": "6dbfcb3fe851724393b46cf43c915217",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 33.5,
"alnum_prop": 0.7332089552238806,
"repo_name": "qcscine/utilities",
"id": "fcdc0a6244287979dce1a9e4fecb30f0a51599ed",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Utils/Python/Tests/test_ElementTypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4108"
},
{
"name": "C++",
"bytes": "5868099"
},
{
"name": "CMake",
"bytes": "11075"
},
{
"name": "Python",
"bytes": "35735"
}
],
"symlink_target": ""
}
|
import json
import logging
from django.shortcuts import render, redirect
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from ims_lti_py.tool_config import ToolConfig
import django.http as http
from models import CanvasApiAuthorization, EdxCourse
from canvas_sdk.exceptions import CanvasAPIError
import canvas_api
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from django.conf import settings
import os
TOOL_NAME = "edx2canvas"
log = logging.getLogger("edx2canvas.log")
@require_http_methods(['GET'])
def tool_config(request):
"""
This produces a Canvas specific XML config that can be used to
add this tool to the Canvas LMS
"""
if request.is_secure():
host = 'https://' + request.get_host()
else:
host = 'http://' + request.get_host()
url = host + reverse('edx2canvas:lti_launch')
lti_tool_config = ToolConfig(
title='Add edX Content',
launch_url=url,
secure_launch_url=url,
)
account_nav_params = {
'enabled': 'true',
'text': 'Add edX Content',
'visibility': 'admins',
}
lti_tool_config.set_ext_param('canvas.instructure.com', 'privacy_level', 'public')
lti_tool_config.set_ext_param('canvas.instructure.com', 'course_navigation', account_nav_params)
lti_tool_config.description = 'Import content from edX to Canvas'
return http.HttpResponse(
lti_tool_config.to_xml(), content_type='text/xml', status=200
)
@login_required()
@require_http_methods(['POST'])
def lti_launch(request):
if request.user.is_authenticated():
return redirect('edx2canvas:main')
else:
return render(request, 'edx2canvas/error.html', {'message': 'Error: user is not authenticated!'})
@login_required()
@require_http_methods(['GET'])
def main(request):
"""
Launch the main page of the authoring app. Create a context that includes
all available edX courses and the module structure of the Canvas course from
which the tool was launched.
"""
try:
canvas_course_id = request.session['LTI_LAUNCH']['custom_canvas_course_id']
canvas_user_id = request.session['LTI_LAUNCH']['user_id']
except KeyError:
return http.HttpResponseBadRequest()
edx_courses = EdxCourse.objects.all()
try:
canvas_auth = CanvasApiAuthorization.objects.get(lti_user_id=canvas_user_id)
except CanvasApiAuthorization.DoesNotExist:
return canvas_api.start_oauth(request, canvas_user_id)
try:
canvas_modules = canvas_api.get_module_list(canvas_auth, canvas_course_id)
except CanvasAPIError as e:
if e.status_code == 401:
return canvas_api.start_oauth(request, canvas_user_id)
raise
return render(request, 'edx2canvas/index.html', {
'edx_courses': edx_courses,
'canvas_modules': json.dumps({'id': canvas_course_id, 'modules': canvas_modules})
})
@login_required()
@require_http_methods(['GET'])
def get_canvas_modules(request):
"""
Fetch the list of modules available in the Canvas course that launched the
tool.
Returns a JSON object with:
- id: the Canvas course ID.
- modules: a list of Canvas module objects.
"""
try:
canvas_course_id = request.GET['course_id']
canvas_user_id = request.session['LTI_LAUNCH']['user_id']
except KeyError:
return http.HttpResponseBadRequest()
try:
canvas_auth = CanvasApiAuthorization.objects.get(lti_user_id=canvas_user_id)
except CanvasApiAuthorization.DoesNotExist:
return http.HttpResponseForbidden()
module_list = canvas_api.get_module_list(canvas_auth, canvas_course_id)
return http.JsonResponse(
{'id': request.GET['course_id'], 'modules': module_list}, safe=False
)
@login_required()
@require_http_methods(['GET'])
def get_edx_course(request):
"""
Load and parse an edX course.
Returns a JSON representation of the edX course structure. Note that this
JSON object is a direct parsing of the edX course XML structure, and may
change with little or no warning if the edX export format is modified.
"""
try:
course_id = request.GET['edx_course_id']
except KeyError:
return http.HttpResponseBadRequest()
try:
edx_course = EdxCourse.objects.get(id=course_id)
except EdxCourse.DoesNotExist:
return http.HttpResponseNotFound()
input_filename = '%s.json' % course_id
if settings.STORE_FILES_IN_S3:
try:
courses_bucket_name = getattr(settings, 'COURSES_BUCKET', None)
# get the bucket
log.info("reading file from s3")
conn = S3Connection()
courses_bucket = conn.get_bucket(courses_bucket_name)
path = getattr(settings, 'COURSES_FOLDER', None)
full_key_name = os.path.join(path, input_filename)
k = Key(courses_bucket)
k.key = full_key_name
k.content_type = 'application/json'
k.content_encoding = 'UTF-8'
parsed = json.loads(k.get_contents_as_string())
k.close()
parsed['id'] = course_id
return http.JsonResponse(parsed, safe=False)
except IOError:
return http.HttpResponseNotFound()
else:
try:
with open("courses/{}.json".format(course_id)) as infile:
parsed = json.load(infile)
parsed['id'] = course_id
return http.JsonResponse(parsed, safe=False)
except IOError:
return http.HttpResponseNotFound()
@require_http_methods(['POST'])
def create_edx_course(request):
try:
data = json.loads(request.body)
title = data['title']
org = data['org']
course = data['course']
run = data['run']
key_version = data['key_version']
body = json.loads(data['body'])
edx_course, __ = EdxCourse.objects.get_or_create(
title=title,
org=org,
course=course,
run=run,
key_version=key_version
)
except KeyError as e:
log.info("{}".format(e))
return http.HttpResponseBadRequest()
output_filename = '%s.json' % edx_course.id
output = json.dumps(body, indent=4)
if settings.STORE_FILES_IN_S3:
try:
utf8_output = output.encode('utf-8')
courses_bucket_name = getattr(settings, 'COURSES_BUCKET', None)
# get the bucket
log.info("writing file to s3")
conn = S3Connection()
courses_bucket = conn.get_bucket(courses_bucket_name)
path = getattr(settings, 'COURSES_FOLDER', None)
full_key_name = os.path.join(path, output_filename)
k = Key(courses_bucket)
k.key = full_key_name
k.content_type = 'application/json'
k.content_encoding = 'UTF-8'
k.set_contents_from_string(utf8_output)
k.close()
except Exception as e:
log.info("{}".format(e))
return http.HttpResponseServerError()
else:
with open("courses/{}.json".format(edx_course.id), 'w') as outfile:
outfile.write(json.dumps(body, indent=4))
return HttpResponse(status=201)
|
{
"content_hash": "82009008f5c294afd82d0645536a9c88",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 105,
"avg_line_length": 33.800904977375566,
"alnum_prop": 0.6329317269076306,
"repo_name": "penzance/edx-in-canvas",
"id": "89005572d3a7872ee83de39d35965f4c575a6b8b",
"size": "7470",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "edx2canvas/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20882"
},
{
"name": "HTML",
"bytes": "14271"
},
{
"name": "JavaScript",
"bytes": "25757"
},
{
"name": "Puppet",
"bytes": "5944"
},
{
"name": "Python",
"bytes": "64549"
},
{
"name": "Shell",
"bytes": "332"
}
],
"symlink_target": ""
}
|
import qiime2.core.archive.format.v0 as v0
class ArchiveFormat(v0.ArchiveFormat):
PROVENANCE_DIR = 'provenance'
@classmethod
def write(cls, archive_record, type, format, data_initializer,
provenance_capture):
super().write(archive_record, type, format, data_initializer,
provenance_capture)
root = archive_record.root
prov_dir = root / cls.PROVENANCE_DIR
prov_dir.mkdir()
provenance_capture.finalize(
prov_dir, [root / cls.METADATA_FILE, archive_record.version_fp])
def __init__(self, archive_record):
super().__init__(archive_record)
self.provenance_dir = archive_record.root / self.PROVENANCE_DIR
|
{
"content_hash": "2a0792330eb7a3a0fc2f1c710ee96cfb",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 31.565217391304348,
"alnum_prop": 0.6377410468319559,
"repo_name": "thermokarst/qiime2",
"id": "6980762ecb57fe8607a1e9fd145f4ceb61b13597",
"size": "1076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiime2/core/archive/format/v1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "506"
},
{
"name": "Python",
"bytes": "895865"
},
{
"name": "Shell",
"bytes": "217"
},
{
"name": "TeX",
"bytes": "5480"
}
],
"symlink_target": ""
}
|
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from tensorflow.tools.test import system_info_lib
def main(unused_args):
config = system_info_lib.gather_machine_configuration()
print(config)
if __name__ == "__main__":
app.run() # pylint: disable=no-value-for-parameter
|
{
"content_hash": "5d1868b9e8d99bf4b8f2832f9f9ce55f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 25.529411764705884,
"alnum_prop": 0.7327188940092166,
"repo_name": "sarvex/tensorflow",
"id": "4f1a1a49f188d80e5061b53c504026cc02eb8fc5",
"size": "1123",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/tools/test/system_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore
from PyQt4.QtCore import pyqtSignal, pyqtSlot
# 工作线程类型,用于统一处理返回信息时做判断
from src.strategy import strategy_macdCross, strategy_macdDiverse, strategy_maCross
workerTypeDayRise = 1
workerTypePickup = 2
workerStop = False
class WorkerDayRiseList(QtCore.QObject):
"""
获取涨幅排行榜的工作线程
"""
start = pyqtSignal()
progressRange = pyqtSignal(int)
progressStep = QtCore.pyqtSignal(int, name="changed")
def __init__(self, data_provider, parent=None):
super(WorkerDayRiseList, self).__init__(parent)
self.start.connect(self.run)
self.dataProvider = data_provider
@pyqtSlot()
def run(self):
# print('object worker thread id: {}'.format(QtCore.QThread.currentThreadId()))
code_list = self.dataProvider.get_day_rise()
self.emit(QtCore.SIGNAL('work_finished'), workerTypeDayRise, code_list)
class WorkerPickup(QtCore.QObject):
"""
选股操作的工作线程
"""
start = pyqtSignal()
progressRange = pyqtSignal(int)
progressStep = QtCore.pyqtSignal(int, name="changed")
def __init__(self, tradeDate, codeList, klineType, dataProvider, strategyFilter, strategy, param, parent=None):
"""
:param tradeDate: 交易日期
:param codeList: 代码列表
:param klineType: K线周期,[u'30分钟', u'60分钟', u'日线', u'周线', u'月线']
:param strategyFilter: 选股策略过滤器
:param parent:
:param param: maCross:ma1,ma2
:return:
"""
super(WorkerPickup, self).__init__(parent)
self.start.connect(self.run)
self.tradeDate = tradeDate
self.codeList = codeList
self.klineType = klineType
self.dataProvider = dataProvider
self.strategyFilter = strategyFilter
self.strategy = strategy
self.pickup = []
self.param = param
@pyqtSlot()
def run(self):
self.pickup = []
# apply strategy
step = 1
bingo = False
codes = self.codeList.ix[:, 0]
for code in codes:
if workerStop:
break
self.emit(QtCore.SIGNAL('progressUpdate'), step)
step += 1
bingo = False
for st in self.strategyFilter:
if st == strategy_macdCross:
if self.strategy.macd_cross(code, self.tradeDate, self.klineType):
bingo = True
else:
continue
elif st == strategy_macdDiverse:
if self.strategy.macd_divergence(code, self.tradeDate, self.klineType):
bingo = True
else:
continue
elif st == strategy_maCross:
if self.strategy.ma_cross(code, self.tradeDate, self.klineType, self.param):
bingo = True
else:
continue
if not bingo:
continue
print code
self.pickup.append(code)
rowData = self.codeList[self.codeList['code'] == code]
try:
# 不同来源编码不一致,这里需要处理一下
stockName = unicode(rowData['name'].tolist()[0], 'utf-8')
except:
stockName = rowData['name'].tolist()[0]
if 'pe' in rowData:
# 需要获取涨跌幅,收盘价,换手率信息
rowData = self.strategy.dataProvider.get_last_trade_data(code)
self.emit(QtCore.SIGNAL('updatePickup'), [len(self.pickup), code, stockName, '%.02f' % rowData['changepercent'], '%.02f' % rowData['trade'], '%.02f' % rowData['turnoverratio']])
self.emit(QtCore.SIGNAL('work_finished'), workerTypePickup, [])
if __name__ == '__main__':
pass
|
{
"content_hash": "7d723ee37193d0dbe4880e303b345153",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 189,
"avg_line_length": 34.127272727272725,
"alnum_prop": 0.5623335109216835,
"repo_name": "aslucky/StockHelper",
"id": "6473e572dbd4d5dc44abbc59859fa26dbd8f38f2",
"size": "3982",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/threadWorker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46339"
}
],
"symlink_target": ""
}
|
from threading import Thread
from subprocess import Popen, PIPE
class PlaySound(Thread):
def __init__(self, filename, volume):
Thread.__init__(self)
self.filename = filename
self.volume = volume
def run(self):
cmd = 'play -v ' + self.volume + ' ' + self.filename
p = Popen(cmd, shell=True, stderr=PIPE, close_fds=True)
# TODO: Test if limits the number of clicks
p.wait()
if p.returncode != 0:
print '\033[1;31mWe found a error with SoX, did you install it?\033[1;m'
p.stderr.read()
|
{
"content_hash": "d1694207aa695f85172369f914510618",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6441947565543071,
"repo_name": "Nixsm/linux-clicky",
"id": "ad2b692f10aa2877edb5ddd6405721f92ab30f17",
"size": "691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linux_clicky/play_sound.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3508"
}
],
"symlink_target": ""
}
|
"""Defines the factory for creating monitors"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
_SCANNERS = {}
def add_scanner_type(scanner_class):
"""Registers a scanner class so it can be used for Scale Scans
:param scanner_class: The class definition for a scanner
:type scanner_class: class:`ingest.scan.scanners.scanner.Scanner`
"""
scanner = scanner_class()
if scanner.scanner_type in _SCANNERS:
logger.warning('Duplicate scanner registration: %s', scanner.scanner_type)
_SCANNERS[scanner.scanner_type] = scanner_class
def get_scanner(scanner_type):
"""Returns a scanner of the given type that is set to scan the given workspace
:param scanner_type: The unique identifier of a registered scanner
:type scanner_type: string
:returns: A scanner for storing and retrieving files.
:rtype: :class:`ingest.scan.scanners.scanner.Scanner`
"""
if scanner_type in _SCANNERS:
return _SCANNERS[scanner_type]()
raise KeyError('\'%s\' is an invalid scanner type' % scanner_type)
def get_scanner_types():
"""Returns a list of type identifiers for all registered scanners
:returns: A list of scanner types
:rtype: [string]
"""
return _SCANNERS.keys()
|
{
"content_hash": "a572dcf97fb4fb5dcef978ee4bdb9607",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 82,
"avg_line_length": 28.8,
"alnum_prop": 0.6975308641975309,
"repo_name": "ngageoint/scale",
"id": "83694752cdf6c6f464f91220c8a7a8f86c9563f3",
"size": "1296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scale/ingest/scan/scanners/factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7219"
},
{
"name": "CSS",
"bytes": "12193"
},
{
"name": "Dockerfile",
"bytes": "14853"
},
{
"name": "HCL",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "48818"
},
{
"name": "JavaScript",
"bytes": "503"
},
{
"name": "Makefile",
"bytes": "5852"
},
{
"name": "Python",
"bytes": "5295677"
},
{
"name": "Shell",
"bytes": "26650"
}
],
"symlink_target": ""
}
|
"""
Functions for analyzing/parsing docstrings
"""
import logging
import re
log = logging.getLogger(__name__)
def strip_rst(docs):
"""
Strip/replace reStructuredText directives in docstrings
"""
for func, docstring in docs.items():
log.debug("Stripping docstring for %s", func)
if not docstring:
continue
docstring_new = docstring
for regex, repl in (
(r" *.. code-block:: \S+\n{1,2}", ""),
(".. note::", "Note:"),
(".. warning::", "Warning:"),
(".. versionadded::", "New in version"),
(".. versionchanged::", "Changed in version"),
):
try:
docstring_new = re.sub(regex, repl, docstring_new)
except Exception: # pylint: disable=broad-except
log.debug(
"Exception encountered while matching regex %r to "
"docstring for function %s",
regex,
func,
exc_info=True,
)
if docstring != docstring_new:
docs[func] = docstring_new
return docs
def parse_docstring(docstring):
"""
Parse a docstring into its parts.
Currently only parses dependencies, can be extended to parse whatever is
needed.
Parses into a dictionary:
{
'full': full docstring,
'deps': list of dependencies (empty list if none)
}
"""
# First try with regex search for :depends:
ret = {"full": docstring}
regex = r"([ \t]*):depends:[ \t]+- (\w+)[^\n]*\n(\1[ \t]+- (\w+)[^\n]*\n)*"
match = re.search(regex, docstring, re.M)
if match:
deps = []
regex = r"- (\w+)"
for line in match.group(0).strip().splitlines():
deps.append(re.search(regex, line).group(1))
ret["deps"] = deps
return ret
# Try searching for a one-liner instead
else:
txt = "Required python modules: "
data = docstring.splitlines()
dep_list = list(x for x in data if x.strip().startswith(txt))
if not dep_list:
ret["deps"] = []
return ret
deps = dep_list[0].replace(txt, "").strip().split(", ")
ret["deps"] = deps
return ret
|
{
"content_hash": "8b27aebf6dfcd6fb4ddc954d50c8315e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 30.69333333333333,
"alnum_prop": 0.5152041702867072,
"repo_name": "saltstack/salt",
"id": "9f80bc3337bb5edb05252aeb1553f7463fac3f6a",
"size": "2302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/utils/doc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from .models import EmployeeBadge, Star, Badge
from categories.serializers import CategorySerializer, KeywordSerializer
from employees.models import Employee, Position
from rest_framework import serializers
class PositionSerializer(serializers.ModelSerializer):
class Meta(object):
model = Position
class EmployeeSimpleSerializer(serializers.ModelSerializer):
position = PositionSerializer()
class Meta(object):
model = Employee
fields = ('pk', 'username', 'first_name', 'last_name', 'avatar', 'position')
class StarSerializer(serializers.ModelSerializer):
from_user = EmployeeSimpleSerializer()
keyword = KeywordSerializer()
class Meta(object):
model = Star
fields = ('pk', 'date', 'text', 'from_user', 'to_user', 'category', 'keyword')
class StarInputSerializer(serializers.ModelSerializer):
class Meta(object):
model = Star
fields = ('pk', 'date', 'text', 'from_user', 'to_user', 'category', 'keyword')
class StarBulkSerializer(serializers.Serializer):
category = serializers.IntegerField()
keyword = serializers.IntegerField()
text = serializers.CharField()
to_users = serializers.ListField(
child=serializers.IntegerField()
)
class StarSmallSerializer(serializers.ModelSerializer):
from_user = EmployeeSimpleSerializer()
category = CategorySerializer()
keyword = KeywordSerializer()
class Meta(object):
model = Star
depth = 1
fields = ('pk', 'date', 'text', 'category', 'from_user', 'keyword')
class StarSwaggerSerializer(serializers.ModelSerializer):
class Meta(object):
model = Star
fields = ('pk', 'category', 'keyword', 'text')
class StarEmployeeCategoriesSerializer(serializers.Serializer):
pk = serializers.IntegerField(source='category__pk')
name = serializers.CharField(max_length=100, source='category__name')
num_stars = serializers.IntegerField()
class StarEmployeeKeywordsSerializer(serializers.Serializer):
pk = serializers.IntegerField(source='keyword__pk')
name = serializers.CharField(max_length=100, source='keyword__name')
num_stars = serializers.IntegerField()
class StarTopEmployeeLists(serializers.Serializer):
def get_avatar(self, data):
employee = Employee.objects.get(pk=data['to_user__pk'])
if employee.avatar:
avatar_url = employee.avatar.url
else:
avatar_url = ""
return avatar_url
pk = serializers.IntegerField(source='to_user__pk')
username = serializers.CharField(max_length=100, source='to_user__username')
first_name = serializers.CharField(max_length=100, source='to_user__first_name')
last_name = serializers.CharField(max_length=100, source='to_user__last_name')
level = serializers.IntegerField(source='to_user__level')
avatar = serializers.SerializerMethodField()
num_stars = serializers.IntegerField()
class StarKeywordList(serializers.Serializer):
pk = serializers.IntegerField(source='keyword__pk')
name = serializers.CharField(source='keyword__name')
num_stars = serializers.IntegerField()
class BadgeSerializer(serializers.ModelSerializer):
class Meta(object):
model = Badge
fields = ('pk', 'name', 'icon', 'description', 'sharing_text')
class EmployeeBadgeSerializer(serializers.ModelSerializer):
to_user = EmployeeSimpleSerializer()
assigned_by = EmployeeSimpleSerializer()
badge = BadgeSerializer()
class Meta(object):
model = EmployeeBadge
fields = ('pk', 'date', 'to_user', 'assigned_by', 'badge')
class EmployeeBadgeListSerializer(serializers.Serializer):
pk = serializers.IntegerField(source='badge__pk')
name = serializers.CharField(source='badge__name')
num_employees = serializers.IntegerField()
class EmployeeGroupedListSerializer(serializers.Serializer):
def get_avatar(self, data):
employee = Employee.objects.get(pk=data['to_user__pk'])
if employee.avatar:
avatar_url = employee.avatar.url
else:
avatar_url = ""
return avatar_url
pk = serializers.IntegerField(source='to_user__pk')
username = serializers.CharField(max_length=100, source='to_user__username')
first_name = serializers.CharField(max_length=100, source='to_user__first_name')
last_name = serializers.CharField(max_length=100, source='to_user__last_name')
level = serializers.IntegerField(source='to_user__level')
avatar = serializers.SerializerMethodField()
|
{
"content_hash": "76493f511f773857b0b191201bb2be16",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 86,
"avg_line_length": 33.81481481481482,
"alnum_prop": 0.6974808324205914,
"repo_name": "belatrix/BackendAllStars",
"id": "64d44ff8ef52ec3b49e67bb6d2d93cde72826653",
"size": "4565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stars/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198882"
}
],
"symlink_target": ""
}
|
"""
base collection classes for building collections.
Common functions such as pagination and such live in here.
http://xkcd.com/353/
Josh Ashby
2013
http://joshashby.com
joshuaashby@joshashby.com
"""
class baseCollection(object):
"""
Attempts to be a common ground for collections giving them all a similar
functionality
"""
def paginate(self, pageNumber, perPage):
"""
Paginates self.pail
"""
pageNumber = int(pageNumber)
perPage = int(perPage)
pailVolume = len(self.pail)
startingPlace = 0
if pageNumber != 1:
startingPlace = (pageNumber-1) * perPage
if startingPlace > pailVolume:
raise Exception("Starting place outside of collections length.")
endingPlace = pageNumber*perPage
if endingPlace > pailVolume:
endingPlace = pailVolume
self.pagination = self.pail[startingPlace:endingPlace]
self.paginateSettings = {"pageNumber": pageNumber, "perPage": perPage}
def resetPagination(self):
"""
Resets the pagination, for whatever reason this may be needed,
It should work just fine.
"""
self.pagination = []
@property
def currentPage(self):
"""
Simple helper property to return the current page number that the collection
is paginated on.
"""
return self.paginateSettings["pageNumber"]
@property
def perPage(self):
"""
Simple helper to return the perpage which the pagination was called with
"""
return self.paginateSettings["perPage"]
@property
def hasNextPage(self):
"""
Returns true if there are more results past the current paginated results.
:return: Boolean if there is a next page or not.
:rtype: Boolean
"""
perPage = self.paginateSettings["perPage"]
pageNumber = self.paginateSettings["pageNumber"]
endingPlace = (pageNumber+1)*perPage
if endingPlace > len(self.pail):
return False
return True
@property
def pages(self):
"""
Returns the number of pages of which the results span
:return: Integer of how many pages are contained within the
paginated collection
:rtype: Int
"""
pailVolume = float(len(self.pail))
perPage = float(self.paginateSettings["perPage"])
if perPage > pailVolume:
return 1
return int(round(pailVolume/perPage))
def preInitAppend(self, drip):
"""
Pre append hook for adding a redisObject to the internal
_collection list. Inheriting classes should override this if
any modification needs to be made on `drip`
:param drip: a `redisObject` instance
:type drip: redisObject
:return: `drip` instance modified or unmodified
:rtype: redisObject
"""
return drip
def postInitAppend(self):
"""
Post append hook that runs after each `redisObject` is inserted into
self._collection
Note: Accepts nothing and returns nothing.
"""
pass
def sortBy(self, by, desc=True):
"""
Sorts the collection by the field specified in `by`
:param by: The name of the field by which the collection should be
sorted by
:type by: Str
:param desc: If false then the collection is sorted then revered.
:type desc: Boolean
:return: The collection after sorting
:rtype: List
"""
self._collection.sort(key=lambda x: x[by])
if not desc:
self._collection.reverse()
return self._collection
def withoutCollection(self, subCol):
"""
Removes all the elements in subCol from self.pail, resulting in
in self.pail becoming a list, rather than a RedisList, meaning
any addObject or delObject made to the collection won't be stored.
"""
subPail = subCol.pail
self.pail = list(set(self.pail) - set(subPail))
@property
def tub(self):
return self._collection
def __iter__(self):
"""
Emulates an iterator for use in `for` loops and such
"""
for drip in self._collection:
yield drip
|
{
"content_hash": "4524544ede31be6bd988cb43929a4555",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 84,
"avg_line_length": 29.106666666666666,
"alnum_prop": 0.60879523591388,
"repo_name": "JoshAshby/Fla.gr",
"id": "1aa1703dc6eb8f0bc73709963fff6c0434e58102",
"size": "4388",
"binary": false,
"copies": "1",
"ref": "refs/heads/organize",
"path": "app/models/baseCollection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "12610"
},
{
"name": "JavaScript",
"bytes": "31677"
},
{
"name": "Python",
"bytes": "163850"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserAccount'
db.create_table(u'snappybouncer_useraccount', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=43)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('created_at', self.gf('snappybouncer.models.AutoNewDateTimeField')(blank=True)),
('updated_at', self.gf('snappybouncer.models.AutoDateTimeField')(blank=True)),
))
db.send_create_signal(u'snappybouncer', ['UserAccount'])
# Adding model 'Conversation'
db.create_table(u'snappybouncer_conversation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_account', self.gf('django.db.models.fields.related.ForeignKey')(related_name='conversations', to=orm['snappybouncer.UserAccount'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=43)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('created_at', self.gf('snappybouncer.models.AutoNewDateTimeField')(blank=True)),
('updated_at', self.gf('snappybouncer.models.AutoDateTimeField')(blank=True)),
))
db.send_create_signal(u'snappybouncer', ['Conversation'])
# Adding model 'Ticket'
db.create_table(u'snappybouncer_ticket', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('conversation', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tickets', to=orm['snappybouncer.Conversation'])),
('support_nonce', self.gf('django.db.models.fields.CharField')(max_length=43, null=True, blank=True)),
('support_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('response', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('contact_key', self.gf('django.db.models.fields.CharField')(max_length=43)),
('msisdn', self.gf('django.db.models.fields.CharField')(max_length=100)),
('created_at', self.gf('snappybouncer.models.AutoNewDateTimeField')(blank=True)),
('updated_at', self.gf('snappybouncer.models.AutoDateTimeField')(blank=True)),
))
db.send_create_signal(u'snappybouncer', ['Ticket'])
def backwards(self, orm):
# Deleting model 'UserAccount'
db.delete_table(u'snappybouncer_useraccount')
# Deleting model 'Conversation'
db.delete_table(u'snappybouncer_conversation')
# Deleting model 'Ticket'
db.delete_table(u'snappybouncer_ticket')
models = {
u'snappybouncer.conversation': {
'Meta': {'object_name': 'Conversation'},
'created_at': ('snappybouncer.models.AutoNewDateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '43'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('snappybouncer.models.AutoDateTimeField', [], {'blank': 'True'}),
'user_account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conversations'", 'to': u"orm['snappybouncer.UserAccount']"})
},
u'snappybouncer.ticket': {
'Meta': {'object_name': 'Ticket'},
'contact_key': ('django.db.models.fields.CharField', [], {'max_length': '43'}),
'conversation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets'", 'to': u"orm['snappybouncer.Conversation']"}),
'created_at': ('snappybouncer.models.AutoNewDateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'msisdn': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'support_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'support_nonce': ('django.db.models.fields.CharField', [], {'max_length': '43', 'null': 'True', 'blank': 'True'}),
'updated_at': ('snappybouncer.models.AutoDateTimeField', [], {'blank': 'True'})
},
u'snappybouncer.useraccount': {
'Meta': {'object_name': 'UserAccount'},
'created_at': ('snappybouncer.models.AutoNewDateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '43'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('snappybouncer.models.AutoDateTimeField', [], {'blank': 'True'})
}
}
complete_apps = ['snappybouncer']
|
{
"content_hash": "8b7e6ffd795a7ad7d75590c9e216bfd3",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 158,
"avg_line_length": 61.242105263157896,
"alnum_prop": 0.6005500171880371,
"repo_name": "praekelt/ndoh-control",
"id": "0d0846a29ceb87a9e9182bbd9236d3caa6003077",
"size": "5842",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "snappybouncer/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19562"
},
{
"name": "HTML",
"bytes": "32320"
},
{
"name": "JavaScript",
"bytes": "65518"
},
{
"name": "Nginx",
"bytes": "777"
},
{
"name": "Python",
"bytes": "553807"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
}
|
import geojson, json, random, time, os, datetime, feedparser
from ingest import ingest_data
from housepy import config, log, strings, util, net
from mongo import db
"""
Grab Medium blog entries
"""
ACCOUNTS = [list(item.keys())[0] for item in config['twitter']['accounts']] # same handles
MEMBERS = [list(item.values())[0] for item in config['twitter']['accounts']]
def parse(request):
log.info("blog.parse")
log.error("nop") # dont call via post
return None
def main(): ## called via tweet_grabber.py
for a, account in enumerate(ACCOUNTS):
log.info("Checking %s..." % account)
try:
feed = "https://medium.com/feed/@%s" % account
data = feedparser.parse(feed)['entries']
except Exception as e:
log.error(log.exc(e))
continue
for entry in data:
try:
entry = {strings.camelcase(key): value for (key, value) in entry.items() if key in ['title', 'link', 'summary', 'published']}
entry['Member'] = MEMBERS[a]
entry['t_utc'] = util.timestamp(util.parse_date(entry['Published']))
if entry['t_utc'] < (util.timestamp(util.parse_date(str(config['start_date'][config['expedition']])))) - (3 * 24 * 60 * 60): ## hack, minus three days to get jer's post
log.info("--> skipping too early blog post")
continue
del entry['Published']
entry['Url'] = entry['Link']
del entry['Link']
entry['Summary'] = strings.strip_html(entry['Summary']).replace("Continue reading on Medium \u00bb", "")
entry['FeatureType'] = "blog"
dup = db.features.find_one({'properties.FeatureType': 'blog', 'properties.Url': entry['Url']})
if dup is not None:
log.info("--> skipping duplicate blog post")
continue
log.info("--> %s" % entry)
success, value = ingest_data("blog", entry)
if not success:
log.error("--> failed: %s" % value)
else:
log.info("--> %s" % value)
except Exception as e:
log.error(log.exc(e))
continue
|
{
"content_hash": "8c1cb94c00e7664e707ab3119cbbe8f0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 184,
"avg_line_length": 40.8421052631579,
"alnum_prop": 0.5287800687285223,
"repo_name": "brianhouse/okavango_15",
"id": "fe087111b53d2c8edf365550c75db5b7929188e2",
"size": "2328",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ingest/blog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "55458"
},
{
"name": "HTML",
"bytes": "191304"
},
{
"name": "JavaScript",
"bytes": "540434"
},
{
"name": "Python",
"bytes": "107391"
},
{
"name": "Shell",
"bytes": "12556"
}
],
"symlink_target": ""
}
|
import copy
from flask import request
from flexget.config_schema import process_config
from flexget.api import api, APIResource, ApiError, NotFoundError
# Tasks API
tasks_api = api.namespace('tasks', description='Manage Tasks')
task_api_schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'config': {'$ref': '/schema/plugins'}
},
'additionalProperties': False
}
tasks_api_schema = {
"type": "object",
"properties": {
"tasks": {
"type": "array",
"items": task_api_schema
}
},
'additionalProperties': False
}
tasks_api_schema = api.schema('tasks', tasks_api_schema)
task_api_schema = api.schema('task', task_api_schema)
@tasks_api.route('/')
class TasksAPI(APIResource):
@api.response(200, 'list of tasks', tasks_api_schema)
def get(self, session=None):
""" Show all tasks """
tasks = []
for name, config in self.manager.user_config.get('tasks', {}).iteritems():
tasks.append({'name': name, 'config': config})
return {'tasks': tasks}
@api.validate(task_api_schema)
@api.response(201, 'newly created task', task_api_schema)
@api.response(409, 'task already exists', task_api_schema)
def post(self, session=None):
""" Add new task """
data = request.json
task_name = data['name']
if task_name in self.manager.user_config.get('tasks', {}):
return {'error': 'task already exists'}, 409
if 'tasks' not in self.manager.user_config:
self.manager.user_config['tasks'] = {}
if 'tasks' not in self.manager.config:
self.manager.config['tasks'] = {}
task_schema_processed = copy.deepcopy(data)
errors = process_config(task_schema_processed, schema=task_api_schema.__schema__, set_defaults=True)
if errors:
return {'error': 'problem loading config, raise a BUG as this should not happen!'}, 500
self.manager.user_config['tasks'][task_name] = data['config']
self.manager.config['tasks'][task_name] = task_schema_processed['config']
self.manager.save_config()
self.manager.config_changed()
return {'name': task_name, 'config': self.manager.user_config['tasks'][task_name]}, 201
@tasks_api.route('/<task>/')
@api.doc(params={'task': 'task name'})
class TaskAPI(APIResource):
@api.response(200, 'task config', task_api_schema)
@api.response(NotFoundError, 'task not found')
@api.response(ApiError, 'unable to read config')
def get(self, task, session=None):
""" Get task config """
if task not in self.manager.user_config.get('tasks', {}):
raise NotFoundError('task `%s` not found' % task)
return {'name': task, 'config': self.manager.user_config['tasks'][task]}
@api.validate(task_api_schema)
@api.response(200, 'updated task', task_api_schema)
@api.response(201, 'renamed task', task_api_schema)
@api.response(404, 'task does not exist', task_api_schema)
@api.response(400, 'cannot rename task as it already exist', task_api_schema)
def post(self, task, session=None):
""" Update tasks config """
data = request.json
new_task_name = data['name']
if task not in self.manager.user_config.get('tasks', {}):
return {'error': 'task does not exist'}, 404
if 'tasks' not in self.manager.user_config:
self.manager.user_config['tasks'] = {}
if 'tasks' not in self.manager.config:
self.manager.config['tasks'] = {}
code = 200
if task != new_task_name:
# Rename task
if new_task_name in self.manager.user_config['tasks']:
return {'error': 'cannot rename task as it already exist'}, 400
del self.manager.user_config['tasks'][task]
del self.manager.config['tasks'][task]
code = 201
# Process the task config
task_schema_processed = copy.deepcopy(data)
errors = process_config(task_schema_processed, schema=task_api_schema.__schema__, set_defaults=True)
if errors:
return {'error': 'problem loading config, raise a BUG as this should not happen!'}, 500
self.manager.user_config['tasks'][new_task_name] = data['config']
self.manager.config['tasks'][new_task_name] = task_schema_processed['config']
self.manager.save_config()
self.manager.config_changed()
return {'name': new_task_name, 'config': self.manager.user_config['tasks'][new_task_name]}, code
@api.response(200, 'deleted task')
@api.response(404, 'task not found')
def delete(self, task, session=None):
""" Delete a task """
try:
self.manager.config['tasks'].pop(task)
self.manager.user_config['tasks'].pop(task)
except KeyError:
return {'error': 'invalid task'}, 404
self.manager.save_config()
self.manager.config_changed()
return {}
|
{
"content_hash": "4c074f558b03b9b6a737d6740ac550f2",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 108,
"avg_line_length": 34.006711409395976,
"alnum_prop": 0.6046970594039865,
"repo_name": "tsnoam/Flexget",
"id": "83e333a2f6a9825d7f0378c282fad59812a2812f",
"size": "5067",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/api/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4567"
},
{
"name": "HTML",
"bytes": "20672"
},
{
"name": "JavaScript",
"bytes": "36021"
},
{
"name": "Python",
"bytes": "2330178"
}
],
"symlink_target": ""
}
|
"""Base classes for our unit tests.
Allows overriding of CONF for use of fakes, and some black magic for
inline callbacks.
"""
import logging
import os
import fixtures
import mock
import testtools
from testtools import matchers
from brick.openstack.common import log as oslo_logging
from oslo.utils import strutils
LOG = oslo_logging.getLogger(__name__)
_DB_CACHE = None
class TestingException(Exception):
pass
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
environ_enabled = (lambda var_name:
strutils.bool_from_string(os.environ.get(var_name)))
if environ_enabled('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if environ_enabled('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if environ_enabled('OS_LOG_CAPTURE'):
log_format = '%(levelname)s [%(name)s] %(message)s'
if environ_enabled('OS_DEBUG'):
level = logging.DEBUG
else:
level = logging.INFO
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format,
level=level))
def _common_cleanup(self):
"""Runs after each test method to tear down test environment."""
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def log_level(self, level):
"""Set logging level to the specified value."""
log_root = logging.getLogger(None).logger
log_root.setLevel(level)
def mock_object(self, obj, attr_name, new_attr=None, **kwargs):
"""Use python mock to mock an object attribute
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_attr:
new_attr = mock.Mock()
patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = d1
d2str = d2
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{'d1only': d1only, 'd2only': d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
'key': key,
'd1value': d1value,
'd2value': d2value,
})
def assertGreater(self, first, second, msg=None):
"""Python < v2.7 compatibility. Assert 'first' > 'second'."""
try:
f = super(TestCase, self).assertGreater
except AttributeError:
self.assertThat(first,
matchers.GreaterThan(second),
message=msg or '')
else:
f(first, second, msg=msg)
def assertGreaterEqual(self, first, second, msg=None):
"""Python < v2.7 compatibility. Assert 'first' >= 'second'."""
try:
f = super(TestCase, self).assertGreaterEqual
except AttributeError:
self.assertThat(first,
matchers.Not(matchers.LessThan(second)),
message=msg or '')
else:
f(first, second, msg=msg)
|
{
"content_hash": "38d29d72019b62b1db74f2b903ea4b3d",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 35.92090395480226,
"alnum_prop": 0.5410506448568733,
"repo_name": "hemna/cinder-brick",
"id": "789924f814c1f25ad0b1aa250539ff47cad697f5",
"size": "7090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brick/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "290964"
}
],
"symlink_target": ""
}
|
def g():
for a in range(3):
yield a
return 7
def h():
yield 4
yield 5
def f():
print("Yielded from returner", (yield from g()))
print("Yielded from non-return value", (yield from h()))
print( "Result", list( f() ) )
print( "Yielder with return value", list(g()) )
# This will raise when looking up any attribute.
class Broken:
def __iter__(self):
return self
def __next__(self):
return 1
def __getattr__(self, attr):
1/0
def test_broken_getattr_handling():
def g():
yield from Broken()
print( "Next with send: ", end = "" )
try:
gi = g()
next(gi)
gi.send(1)
except Exception as e:
print( "Caught", repr(e) )
print( "Next with throw: ", end = "" )
try:
gi = g()
next(gi)
gi.throw(AttributeError)
except Exception as e:
print( "Caught", repr(e) )
print( "Next with close: ", end = "" )
try:
gi = g()
next(gi)
gi.close()
print( "All good" )
except Exception as e:
print( "Caught", repr(e) )
test_broken_getattr_handling()
def test_throw_catched_subgenerator_handling():
def g1():
try:
print("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
print("Finishing g1")
def g2():
try:
print("Starting g2")
yield "g2 spam"
yield "g2 more spam"
except LunchError:
print("Caught LunchError in g2")
yield "g2 lunch saved"
yield "g2 yet more spam"
class LunchError(Exception):
pass
g = g1()
for i in range(2):
x = next(g)
print("Yielded %s" % (x,))
e = LunchError("tomato ejected")
print( "Throw returned", g.throw(e) )
print( "Sub thrown" )
for x in g:
print("Yielded %s" % (x,))
test_throw_catched_subgenerator_handling()
def give_cpython_generator():
# TODO: This relies on eval not being inlined, which will become untrue.
return eval( "( x for x in range(3) )" )
def gen_compiled():
yield from give_cpython_generator()
yield from range(7)
print( list( gen_compiled() ) )
|
{
"content_hash": "73f0ec4bae018272531c9bf542c22a00",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 76,
"avg_line_length": 23.83653846153846,
"alnum_prop": 0.48527632109721663,
"repo_name": "tempbottle/Nuitka",
"id": "29f504b4f04dbe186259baae95d050405b3789ed",
"size": "3249",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/basics/YieldFrom33.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "433315"
},
{
"name": "Python",
"bytes": "4356577"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
}
|
'''
"version" plugin for cocos2d command line tool
'''
__docformat__ = 'restructuredtext'
import re
import os
import cocos2d
import inspect
#
# Plugins should be a sublass of CCJSPlugin
#
class CCPluginVersion(cocos2d.CCPlugin):
@staticmethod
def plugin_name():
return "version"
@staticmethod
def brief_description():
return "prints the version of the installed components"
def _show_versions(self):
path = os.path.join(self._src_dir, "cocos2dx/cocos2d.cpp")
if not os.path.exists(path):
path = os.path.join(self._src_dir, "cocos/2d/cocos2d.cpp")
if not os.path.exists(path):
raise cocos2d.CCPluginError("Couldn't find file with version information")
with open(path, 'r') as f:
data = f.read()
match = re.search('cocos2dVersion\(\)\s*{\s*return\s+"([^"]+)"\s*;', data)
if match:
print 'cocos2d %s' % match.group(1)
else:
raise cocos2d.CCPluginError("Couldn't find version info")
def run(self, argv, dependencies):
self.parse_args(argv)
self._show_versions()
|
{
"content_hash": "fa63239551fc5db5e856814aa9d70548",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 90,
"avg_line_length": 25.40909090909091,
"alnum_prop": 0.6288014311270125,
"repo_name": "meiry/Cocos2d-x-EarthWarrior3D-win-desktop-version",
"id": "b44f543b3e6574cd52bffdfa008027d7f9a43799",
"size": "1398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cocos2d/tools/cocos2d-console/plugins/plugin_version.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
Created on Sep 17, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'splitField', 'flavor':'TE', 'numRuns':100, 'expt':'fullFreqs', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
D['bkgNo'] = parseNumber + 100
D['freqs'] = np.logspace(2.3010, 4.6990,16)
D['inc'] = np.array([75.0])*np.pi/180
return D
|
{
"content_hash": "3f497107ef0eb5acd3d5a84a7a6744bf",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 96,
"avg_line_length": 33.83870967741935,
"alnum_prop": 0.7273593898951383,
"repo_name": "daStrauss/subsurface",
"id": "5c3e48d88e0a7745628c73e56dce6b2889fc6cf6",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/expts/fullFreqs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "295580"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from cirq_google.cloud.quantum_v1alpha1.types import engine
from cirq_google.cloud.quantum_v1alpha1.types import quantum
from google.protobuf import empty_pb2
from .base import QuantumEngineServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import QuantumEngineServiceGrpcTransport
class QuantumEngineServiceGrpcAsyncIOTransport(QuantumEngineServiceTransport):
"""gRPC AsyncIO backend transport for QuantumEngineService.
-
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'quantum.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'quantum.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_quantum_program(self) -> Callable[
[engine.CreateQuantumProgramRequest],
Awaitable[quantum.QuantumProgram]]:
r"""Return a callable for the create quantum program method over gRPC.
-
Returns:
Callable[[~.CreateQuantumProgramRequest],
Awaitable[~.QuantumProgram]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_quantum_program' not in self._stubs:
self._stubs['create_quantum_program'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CreateQuantumProgram',
request_serializer=engine.CreateQuantumProgramRequest.serialize,
response_deserializer=quantum.QuantumProgram.deserialize,
)
return self._stubs['create_quantum_program']
@property
def get_quantum_program(self) -> Callable[
[engine.GetQuantumProgramRequest],
Awaitable[quantum.QuantumProgram]]:
r"""Return a callable for the get quantum program method over gRPC.
-
Returns:
Callable[[~.GetQuantumProgramRequest],
Awaitable[~.QuantumProgram]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_quantum_program' not in self._stubs:
self._stubs['get_quantum_program'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumProgram',
request_serializer=engine.GetQuantumProgramRequest.serialize,
response_deserializer=quantum.QuantumProgram.deserialize,
)
return self._stubs['get_quantum_program']
@property
def list_quantum_programs(self) -> Callable[
[engine.ListQuantumProgramsRequest],
Awaitable[engine.ListQuantumProgramsResponse]]:
r"""Return a callable for the list quantum programs method over gRPC.
-
Returns:
Callable[[~.ListQuantumProgramsRequest],
Awaitable[~.ListQuantumProgramsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_programs' not in self._stubs:
self._stubs['list_quantum_programs'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumPrograms',
request_serializer=engine.ListQuantumProgramsRequest.serialize,
response_deserializer=engine.ListQuantumProgramsResponse.deserialize,
)
return self._stubs['list_quantum_programs']
@property
def delete_quantum_program(self) -> Callable[
[engine.DeleteQuantumProgramRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete quantum program method over gRPC.
-
Returns:
Callable[[~.DeleteQuantumProgramRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_quantum_program' not in self._stubs:
self._stubs['delete_quantum_program'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/DeleteQuantumProgram',
request_serializer=engine.DeleteQuantumProgramRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_quantum_program']
@property
def update_quantum_program(self) -> Callable[
[engine.UpdateQuantumProgramRequest],
Awaitable[quantum.QuantumProgram]]:
r"""Return a callable for the update quantum program method over gRPC.
-
Returns:
Callable[[~.UpdateQuantumProgramRequest],
Awaitable[~.QuantumProgram]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_quantum_program' not in self._stubs:
self._stubs['update_quantum_program'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/UpdateQuantumProgram',
request_serializer=engine.UpdateQuantumProgramRequest.serialize,
response_deserializer=quantum.QuantumProgram.deserialize,
)
return self._stubs['update_quantum_program']
@property
def create_quantum_job(self) -> Callable[
[engine.CreateQuantumJobRequest],
Awaitable[quantum.QuantumJob]]:
r"""Return a callable for the create quantum job method over gRPC.
-
Returns:
Callable[[~.CreateQuantumJobRequest],
Awaitable[~.QuantumJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_quantum_job' not in self._stubs:
self._stubs['create_quantum_job'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CreateQuantumJob',
request_serializer=engine.CreateQuantumJobRequest.serialize,
response_deserializer=quantum.QuantumJob.deserialize,
)
return self._stubs['create_quantum_job']
@property
def get_quantum_job(self) -> Callable[
[engine.GetQuantumJobRequest],
Awaitable[quantum.QuantumJob]]:
r"""Return a callable for the get quantum job method over gRPC.
-
Returns:
Callable[[~.GetQuantumJobRequest],
Awaitable[~.QuantumJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_quantum_job' not in self._stubs:
self._stubs['get_quantum_job'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumJob',
request_serializer=engine.GetQuantumJobRequest.serialize,
response_deserializer=quantum.QuantumJob.deserialize,
)
return self._stubs['get_quantum_job']
@property
def list_quantum_jobs(self) -> Callable[
[engine.ListQuantumJobsRequest],
Awaitable[engine.ListQuantumJobsResponse]]:
r"""Return a callable for the list quantum jobs method over gRPC.
-
Returns:
Callable[[~.ListQuantumJobsRequest],
Awaitable[~.ListQuantumJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_jobs' not in self._stubs:
self._stubs['list_quantum_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumJobs',
request_serializer=engine.ListQuantumJobsRequest.serialize,
response_deserializer=engine.ListQuantumJobsResponse.deserialize,
)
return self._stubs['list_quantum_jobs']
@property
def delete_quantum_job(self) -> Callable[
[engine.DeleteQuantumJobRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete quantum job method over gRPC.
-
Returns:
Callable[[~.DeleteQuantumJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_quantum_job' not in self._stubs:
self._stubs['delete_quantum_job'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/DeleteQuantumJob',
request_serializer=engine.DeleteQuantumJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_quantum_job']
@property
def update_quantum_job(self) -> Callable[
[engine.UpdateQuantumJobRequest],
Awaitable[quantum.QuantumJob]]:
r"""Return a callable for the update quantum job method over gRPC.
-
Returns:
Callable[[~.UpdateQuantumJobRequest],
Awaitable[~.QuantumJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_quantum_job' not in self._stubs:
self._stubs['update_quantum_job'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/UpdateQuantumJob',
request_serializer=engine.UpdateQuantumJobRequest.serialize,
response_deserializer=quantum.QuantumJob.deserialize,
)
return self._stubs['update_quantum_job']
@property
def cancel_quantum_job(self) -> Callable[
[engine.CancelQuantumJobRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the cancel quantum job method over gRPC.
-
Returns:
Callable[[~.CancelQuantumJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'cancel_quantum_job' not in self._stubs:
self._stubs['cancel_quantum_job'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CancelQuantumJob',
request_serializer=engine.CancelQuantumJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['cancel_quantum_job']
@property
def list_quantum_job_events(self) -> Callable[
[engine.ListQuantumJobEventsRequest],
Awaitable[engine.ListQuantumJobEventsResponse]]:
r"""Return a callable for the list quantum job events method over gRPC.
-
Returns:
Callable[[~.ListQuantumJobEventsRequest],
Awaitable[~.ListQuantumJobEventsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_job_events' not in self._stubs:
self._stubs['list_quantum_job_events'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumJobEvents',
request_serializer=engine.ListQuantumJobEventsRequest.serialize,
response_deserializer=engine.ListQuantumJobEventsResponse.deserialize,
)
return self._stubs['list_quantum_job_events']
@property
def get_quantum_result(self) -> Callable[
[engine.GetQuantumResultRequest],
Awaitable[quantum.QuantumResult]]:
r"""Return a callable for the get quantum result method over gRPC.
-
Returns:
Callable[[~.GetQuantumResultRequest],
Awaitable[~.QuantumResult]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_quantum_result' not in self._stubs:
self._stubs['get_quantum_result'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumResult',
request_serializer=engine.GetQuantumResultRequest.serialize,
response_deserializer=quantum.QuantumResult.deserialize,
)
return self._stubs['get_quantum_result']
@property
def list_quantum_processors(self) -> Callable[
[engine.ListQuantumProcessorsRequest],
Awaitable[engine.ListQuantumProcessorsResponse]]:
r"""Return a callable for the list quantum processors method over gRPC.
-
Returns:
Callable[[~.ListQuantumProcessorsRequest],
Awaitable[~.ListQuantumProcessorsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_processors' not in self._stubs:
self._stubs['list_quantum_processors'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumProcessors',
request_serializer=engine.ListQuantumProcessorsRequest.serialize,
response_deserializer=engine.ListQuantumProcessorsResponse.deserialize,
)
return self._stubs['list_quantum_processors']
@property
def get_quantum_processor(self) -> Callable[
[engine.GetQuantumProcessorRequest],
Awaitable[quantum.QuantumProcessor]]:
r"""Return a callable for the get quantum processor method over gRPC.
-
Returns:
Callable[[~.GetQuantumProcessorRequest],
Awaitable[~.QuantumProcessor]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_quantum_processor' not in self._stubs:
self._stubs['get_quantum_processor'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumProcessor',
request_serializer=engine.GetQuantumProcessorRequest.serialize,
response_deserializer=quantum.QuantumProcessor.deserialize,
)
return self._stubs['get_quantum_processor']
@property
def list_quantum_calibrations(self) -> Callable[
[engine.ListQuantumCalibrationsRequest],
Awaitable[engine.ListQuantumCalibrationsResponse]]:
r"""Return a callable for the list quantum calibrations method over gRPC.
-
Returns:
Callable[[~.ListQuantumCalibrationsRequest],
Awaitable[~.ListQuantumCalibrationsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_calibrations' not in self._stubs:
self._stubs['list_quantum_calibrations'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumCalibrations',
request_serializer=engine.ListQuantumCalibrationsRequest.serialize,
response_deserializer=engine.ListQuantumCalibrationsResponse.deserialize,
)
return self._stubs['list_quantum_calibrations']
@property
def get_quantum_calibration(self) -> Callable[
[engine.GetQuantumCalibrationRequest],
Awaitable[quantum.QuantumCalibration]]:
r"""Return a callable for the get quantum calibration method over gRPC.
-
Returns:
Callable[[~.GetQuantumCalibrationRequest],
Awaitable[~.QuantumCalibration]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_quantum_calibration' not in self._stubs:
self._stubs['get_quantum_calibration'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumCalibration',
request_serializer=engine.GetQuantumCalibrationRequest.serialize,
response_deserializer=quantum.QuantumCalibration.deserialize,
)
return self._stubs['get_quantum_calibration']
@property
def create_quantum_reservation(self) -> Callable[
[engine.CreateQuantumReservationRequest],
Awaitable[quantum.QuantumReservation]]:
r"""Return a callable for the create quantum reservation method over gRPC.
-
Returns:
Callable[[~.CreateQuantumReservationRequest],
Awaitable[~.QuantumReservation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_quantum_reservation' not in self._stubs:
self._stubs['create_quantum_reservation'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CreateQuantumReservation',
request_serializer=engine.CreateQuantumReservationRequest.serialize,
response_deserializer=quantum.QuantumReservation.deserialize,
)
return self._stubs['create_quantum_reservation']
@property
def cancel_quantum_reservation(self) -> Callable[
[engine.CancelQuantumReservationRequest],
Awaitable[quantum.QuantumReservation]]:
r"""Return a callable for the cancel quantum reservation method over gRPC.
-
Returns:
Callable[[~.CancelQuantumReservationRequest],
Awaitable[~.QuantumReservation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'cancel_quantum_reservation' not in self._stubs:
self._stubs['cancel_quantum_reservation'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/CancelQuantumReservation',
request_serializer=engine.CancelQuantumReservationRequest.serialize,
response_deserializer=quantum.QuantumReservation.deserialize,
)
return self._stubs['cancel_quantum_reservation']
@property
def delete_quantum_reservation(self) -> Callable[
[engine.DeleteQuantumReservationRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete quantum reservation method over gRPC.
-
Returns:
Callable[[~.DeleteQuantumReservationRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_quantum_reservation' not in self._stubs:
self._stubs['delete_quantum_reservation'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/DeleteQuantumReservation',
request_serializer=engine.DeleteQuantumReservationRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_quantum_reservation']
@property
def get_quantum_reservation(self) -> Callable[
[engine.GetQuantumReservationRequest],
Awaitable[quantum.QuantumReservation]]:
r"""Return a callable for the get quantum reservation method over gRPC.
-
Returns:
Callable[[~.GetQuantumReservationRequest],
Awaitable[~.QuantumReservation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_quantum_reservation' not in self._stubs:
self._stubs['get_quantum_reservation'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/GetQuantumReservation',
request_serializer=engine.GetQuantumReservationRequest.serialize,
response_deserializer=quantum.QuantumReservation.deserialize,
)
return self._stubs['get_quantum_reservation']
@property
def list_quantum_reservations(self) -> Callable[
[engine.ListQuantumReservationsRequest],
Awaitable[engine.ListQuantumReservationsResponse]]:
r"""Return a callable for the list quantum reservations method over gRPC.
-
Returns:
Callable[[~.ListQuantumReservationsRequest],
Awaitable[~.ListQuantumReservationsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_reservations' not in self._stubs:
self._stubs['list_quantum_reservations'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumReservations',
request_serializer=engine.ListQuantumReservationsRequest.serialize,
response_deserializer=engine.ListQuantumReservationsResponse.deserialize,
)
return self._stubs['list_quantum_reservations']
@property
def update_quantum_reservation(self) -> Callable[
[engine.UpdateQuantumReservationRequest],
Awaitable[quantum.QuantumReservation]]:
r"""Return a callable for the update quantum reservation method over gRPC.
-
Returns:
Callable[[~.UpdateQuantumReservationRequest],
Awaitable[~.QuantumReservation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_quantum_reservation' not in self._stubs:
self._stubs['update_quantum_reservation'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/UpdateQuantumReservation',
request_serializer=engine.UpdateQuantumReservationRequest.serialize,
response_deserializer=quantum.QuantumReservation.deserialize,
)
return self._stubs['update_quantum_reservation']
@property
def quantum_run_stream(self) -> Callable[
[engine.QuantumRunStreamRequest],
Awaitable[engine.QuantumRunStreamResponse]]:
r"""Return a callable for the quantum run stream method over gRPC.
-
Returns:
Callable[[~.QuantumRunStreamRequest],
Awaitable[~.QuantumRunStreamResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'quantum_run_stream' not in self._stubs:
self._stubs['quantum_run_stream'] = self.grpc_channel.stream_stream(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/QuantumRunStream',
request_serializer=engine.QuantumRunStreamRequest.serialize,
response_deserializer=engine.QuantumRunStreamResponse.deserialize,
)
return self._stubs['quantum_run_stream']
@property
def list_quantum_reservation_grants(self) -> Callable[
[engine.ListQuantumReservationGrantsRequest],
Awaitable[engine.ListQuantumReservationGrantsResponse]]:
r"""Return a callable for the list quantum reservation
grants method over gRPC.
-
Returns:
Callable[[~.ListQuantumReservationGrantsRequest],
Awaitable[~.ListQuantumReservationGrantsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_reservation_grants' not in self._stubs:
self._stubs['list_quantum_reservation_grants'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumReservationGrants',
request_serializer=engine.ListQuantumReservationGrantsRequest.serialize,
response_deserializer=engine.ListQuantumReservationGrantsResponse.deserialize,
)
return self._stubs['list_quantum_reservation_grants']
@property
def reallocate_quantum_reservation_grant(self) -> Callable[
[engine.ReallocateQuantumReservationGrantRequest],
Awaitable[quantum.QuantumReservationGrant]]:
r"""Return a callable for the reallocate quantum reservation
grant method over gRPC.
-
Returns:
Callable[[~.ReallocateQuantumReservationGrantRequest],
Awaitable[~.QuantumReservationGrant]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'reallocate_quantum_reservation_grant' not in self._stubs:
self._stubs['reallocate_quantum_reservation_grant'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ReallocateQuantumReservationGrant',
request_serializer=engine.ReallocateQuantumReservationGrantRequest.serialize,
response_deserializer=quantum.QuantumReservationGrant.deserialize,
)
return self._stubs['reallocate_quantum_reservation_grant']
@property
def list_quantum_reservation_budgets(self) -> Callable[
[engine.ListQuantumReservationBudgetsRequest],
Awaitable[engine.ListQuantumReservationBudgetsResponse]]:
r"""Return a callable for the list quantum reservation
budgets method over gRPC.
-
Returns:
Callable[[~.ListQuantumReservationBudgetsRequest],
Awaitable[~.ListQuantumReservationBudgetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_reservation_budgets' not in self._stubs:
self._stubs['list_quantum_reservation_budgets'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumReservationBudgets',
request_serializer=engine.ListQuantumReservationBudgetsRequest.serialize,
response_deserializer=engine.ListQuantumReservationBudgetsResponse.deserialize,
)
return self._stubs['list_quantum_reservation_budgets']
@property
def list_quantum_time_slots(self) -> Callable[
[engine.ListQuantumTimeSlotsRequest],
Awaitable[engine.ListQuantumTimeSlotsResponse]]:
r"""Return a callable for the list quantum time slots method over gRPC.
-
Returns:
Callable[[~.ListQuantumTimeSlotsRequest],
Awaitable[~.ListQuantumTimeSlotsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_quantum_time_slots' not in self._stubs:
self._stubs['list_quantum_time_slots'] = self.grpc_channel.unary_unary(
'/google.cloud.quantum.v1alpha1.QuantumEngineService/ListQuantumTimeSlots',
request_serializer=engine.ListQuantumTimeSlotsRequest.serialize,
response_deserializer=engine.ListQuantumTimeSlotsResponse.deserialize,
)
return self._stubs['list_quantum_time_slots']
def close(self):
return self.grpc_channel.close()
__all__ = (
'QuantumEngineServiceGrpcAsyncIOTransport',
)
|
{
"content_hash": "0d3ab95649fcd3f0912dadc620a48e67",
"timestamp": "",
"source": "github",
"line_count": 953,
"max_line_length": 104,
"avg_line_length": 45.4700944386149,
"alnum_prop": 0.6252048092677636,
"repo_name": "quantumlib/Cirq",
"id": "a01b7ceabd2a188455ed3ed36ac4b01ef85c0a08",
"size": "43933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-google/cirq_google/cloud/quantum_v1alpha1/services/quantum_engine_service/transports/grpc_asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
}
|
import base64
import os
import shutil
import string
import tempfile
import unittest
from datetime import timedelta
from http import cookies
from pathlib import Path
from unittest import mock
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import (
SessionStore as CookieSession,
)
from django.contrib.sessions.exceptions import InvalidSessionKey, SessionInterrupted
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import JSONSerializer
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.core.signing import TimestampSigner
from django.http import HttpResponse
from django.test import (
RequestFactory,
SimpleTestCase,
TestCase,
ignore_warnings,
override_settings,
)
from django.utils import timezone
from .models import SessionStore as CustomDatabaseSession
class SessionTestsMixin:
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertIs(self.session.modified, False)
self.assertIs(self.session.accessed, False)
def test_get_empty(self):
self.assertIsNone(self.session.get("cat"))
def test_store(self):
self.session["cat"] = "dog"
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.pop("cat"), "dog")
def test_pop(self):
self.session["some key"] = "exists"
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop("some key"), "exists")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertIsNone(self.session.get("some key"))
def test_pop_default(self):
self.assertEqual(
self.session.pop("some key", "does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_default_named_argument(self):
self.assertEqual(
self.session.pop("some key", default="does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_no_default_keyerror_raised(self):
with self.assertRaises(KeyError):
self.session.pop("some key")
def test_setdefault(self):
self.assertEqual(self.session.setdefault("foo", "bar"), "bar")
self.assertEqual(self.session.setdefault("foo", "baz"), "bar")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_update(self):
self.session.update({"update key": 1})
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.get("update key", None), 1)
def test_has_key(self):
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn("some key", self.session)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertIs(self.session.accessed, True)
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.values()), [1])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_keys(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.keys()), ["x"])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_items(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_clear(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_save(self):
self.session.save()
self.assertIs(self.session.exists(self.session.session_key), True)
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertIs(self.session.exists(self.session.session_key), False)
def test_flush(self):
self.session["foo"] = "bar"
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertIs(self.session.modified, True)
self.assertIs(self.session.accessed, True)
def test_cycle(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_cycle_with_no_session_cache(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_data = self.session.items()
self.session = self.backend(self.session.session_key)
self.assertIs(hasattr(self.session, "_session_cache"), False)
self.session.cycle_key()
self.assertCountEqual(self.session.items(), prev_data)
def test_save_doesnt_clear_data(self):
self.session["a"] = "b"
self.session.save()
self.assertEqual(self.session["a"], "b")
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend("1")
session.save()
self.assertNotEqual(session.session_key, "1")
self.assertIsNone(session.get("cat"))
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete("1")
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ""
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = "1234567"
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = "12345678"
self.assertEqual(self.session.session_key, "12345678")
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
with self.assertRaises(AttributeError):
set_session_key(self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), False)
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), True)
def test_decode(self):
# Ensure we can decode what we encode
data = {"a test key": "a test value"}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
tests = [
base64.b64encode(b"flaskdj:alkdjf").decode("ascii"),
"bad:encoded:value",
]
for encoded in tests:
with self.subTest(encoded=encoded):
with self.assertLogs(
"django.security.SuspiciousSession", "WARNING"
) as cm:
self.assertEqual(self.session.decode(encoded), {})
# The failed decode is logged.
self.assertIn("Session data corrupted", cm.output[0])
def test_decode_serializer_exception(self):
signer = TimestampSigner(salt=self.session.key_salt)
encoded = signer.sign(b"invalid data")
self.assertEqual(self.session.decode(encoded), {})
def test_actual_expiry(self):
old_session_key = None
new_session_key = None
try:
self.session["foo"] = "bar"
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn("foo", new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
session = self.backend("someunknownkey")
session.load()
self.assertIsNone(session.session_key)
self.assertIs(session.exists(session.session_key), False)
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, "someunknownkey")
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
"""
Sessions shouldn't be resurrected by a concurrent request.
"""
# Create new session.
s1 = self.backend()
s1["test_data"] = "value1"
s1.save(must_create=True)
# Logout in another context.
s2 = self.backend(s1.session_key)
s2.delete()
# Modify session in first context.
s1["test_data"] = "value2"
with self.assertRaises(UpdateError):
# This should throw an exception as the session is deleted, not
# resurrect the session.
s1.save()
self.assertEqual(s1.load(), {})
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = "django.contrib.sessions.backends.db"
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session["x"] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(str(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session["x"] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {"x": 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session["y"] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {"y": 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session["y"], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session["foo"] = "bar"
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session["foo"] = "bar"
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command("clearsessions")
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = "sessions_tests.models"
custom_session_cookie_age = 60 * 60 * 24 # One day.
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session["_auth_user_id"] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop("_auth_user_id")
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertIsNone(s.account_id)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age)
def test_default_expiry(self):
self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertIs(self.session.exists(self.session.session_key), True)
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS="sessions")
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
with self.assertRaises(InvalidCacheBackendError):
self.backend()
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
class FileSessionTests(SessionTestsMixin, SimpleTestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = self.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, "_storage_path"):
del self.backend._storage_path
super().setUp()
def tearDown(self):
super().tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
def mkdtemp(self):
return tempfile.mkdtemp()
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer",
)
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
with self.assertRaises(ImproperlyConfigured):
self.backend()
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an OSError - by creating
# a new session, making it unclear whether the slashes were detected.
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a/b/c")
@override_settings(
SESSION_ENGINE="django.contrib.sessions.backends.file",
SESSION_COOKIE_AGE=0,
)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len(
[
session_file
for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)
]
)
self.assertEqual(0, count_sessions())
# One object in the future
self.session["foo"] = "bar"
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session["foo"] = "bar"
other_session.set_expiry(-3600)
other_session.save()
# One object in the present without an expiry (should be deleted since
# its modification time + SESSION_COOKIE_AGE will be in the past when
# clearsessions runs).
other_session2 = self.backend()
other_session2["foo"] = "bar"
other_session2.save()
# Three sessions are in the filesystem before clearsessions...
self.assertEqual(3, count_sessions())
management.call_command("clearsessions")
# ... and two are deleted.
self.assertEqual(1, count_sessions())
class FileSessionPathLibTests(FileSessionTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
class CacheSessionTests(SessionTestsMixin, SimpleTestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertIsNotNone(caches["default"].get(self.session.cache_key))
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
},
"sessions": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "session",
},
},
SESSION_CACHE_ALIAS="sessions",
)
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertIsNone(caches["default"].get(self.session.cache_key))
self.assertIsNotNone(caches["sessions"].get(self.session.cache_key))
def test_create_and_save(self):
self.session = self.backend()
self.session.create()
self.session.save()
self.assertIsNotNone(caches["default"].get(self.session.cache_key))
class SessionMiddlewareTests(TestCase):
request_factory = RequestFactory()
@staticmethod
def get_response_touching_session(request):
request.session["hello"] = "world"
return HttpResponse("Session test")
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
# Handle the response through the middleware
response = middleware(request)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]["secure"], True)
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
# Handle the response through the middleware
response = middleware(request)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]["httponly"], True)
self.assertIn(
cookies.Morsel._reserved["httponly"],
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
@override_settings(SESSION_COOKIE_SAMESITE="Strict")
def test_samesite_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
response = middleware(request)
self.assertEqual(
response.cookies[settings.SESSION_COOKIE_NAME]["samesite"], "Strict"
)
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
response = middleware(request)
self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]["httponly"], "")
self.assertNotIn(
cookies.Morsel._reserved["httponly"],
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
def test_session_save_on_500(self):
def response_500(request):
response = HttpResponse("Horrible error")
response.status_code = 500
request.session["hello"] = "world"
return response
request = self.request_factory.get("/")
SessionMiddleware(response_500)(request)
# The value wasn't saved above.
self.assertNotIn("hello", request.session.load())
def test_session_save_on_5xx(self):
def response_503(request):
response = HttpResponse("Service Unavailable")
response.status_code = 503
request.session["hello"] = "world"
return response
request = self.request_factory.get("/")
SessionMiddleware(response_503)(request)
# The value wasn't saved above.
self.assertNotIn("hello", request.session.load())
def test_session_update_error_redirect(self):
def response_delete_session(request):
request.session = DatabaseSession()
request.session.save(must_create=True)
request.session.delete()
return HttpResponse()
request = self.request_factory.get("/foo/")
middleware = SessionMiddleware(response_delete_session)
msg = (
"The request's session was deleted before the request completed. "
"The user may have logged out in a concurrent request, for example."
)
with self.assertRaisesMessage(SessionInterrupted, msg):
# Handle the response through the middleware. It will try to save
# the deleted session which will cause an UpdateError that's caught
# and raised as a SessionInterrupted.
middleware(request)
def test_session_delete_on_end(self):
def response_ending_session(request):
request.session.flush()
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_ending_session)
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = "abc"
# Handle the response through the middleware
response = middleware(request)
# The cookie was deleted, not recreated.
# A deleted cookie header looks like:
# "Set-Cookie: sessionid=; expires=Thu, 01 Jan 1970 00:00:00 GMT; "
# "Max-Age=0; Path=/"
self.assertEqual(
'Set-Cookie: {}=""; expires=Thu, 01 Jan 1970 00:00:00 GMT; '
"Max-Age=0; Path=/; SameSite={}".format(
settings.SESSION_COOKIE_NAME,
settings.SESSION_COOKIE_SAMESITE,
),
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
# SessionMiddleware sets 'Vary: Cookie' to prevent the 'Set-Cookie'
# from being cached.
self.assertEqual(response.headers["Vary"], "Cookie")
@override_settings(
SESSION_COOKIE_DOMAIN=".example.local", SESSION_COOKIE_PATH="/example/"
)
def test_session_delete_on_end_with_custom_domain_and_path(self):
def response_ending_session(request):
request.session.flush()
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_ending_session)
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = "abc"
# Handle the response through the middleware
response = middleware(request)
# The cookie was deleted, not recreated.
# A deleted cookie header with a custom domain and path looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0;
# Path=/example/
self.assertEqual(
'Set-Cookie: {}=""; Domain=.example.local; expires=Thu, '
"01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/example/; SameSite={}".format(
settings.SESSION_COOKIE_NAME,
settings.SESSION_COOKIE_SAMESITE,
),
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
def response_ending_session(request):
request.session.flush()
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_ending_session)
# Handle the response through the middleware
response = middleware(request)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response.headers["Vary"], "Cookie")
def test_empty_session_saved(self):
"""
If a session is emptied of data but still has a key, it should still
be updated.
"""
def response_set_session(request):
# Set a session key and some data.
request.session["foo"] = "bar"
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_set_session)
# Handle the response through the middleware.
response = middleware(request)
self.assertEqual(tuple(request.session.items()), (("foo", "bar"),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
"Set-Cookie: sessionid=%s" % request.session.session_key,
str(response.cookies),
)
self.assertEqual(response.headers["Vary"], "Cookie")
# Empty the session data.
del request.session["foo"]
# Handle the response through the middleware.
response = HttpResponse("Session test")
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
"Set-Cookie: sessionid=%s" % request.session.session_key,
str(response.cookies),
)
self.assertEqual(response.headers["Vary"], "Cookie")
class CookieSessionTests(SessionTestsMixin, SimpleTestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super().test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
with mock.patch("django.core.signing.loads", side_effect=ValueError):
self.session.load()
@unittest.skip(
"Cookie backend doesn't have an external store to create records in."
)
def test_session_load_does_not_create_record(self):
pass
@unittest.skip(
"CookieSession is stored in the client and there is no way to query it."
)
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
pass
class ClearSessionsCommandTests(SimpleTestCase):
def test_clearsessions_unsupported(self):
msg = (
"Session engine 'sessions_tests.no_clear_expired' doesn't "
"support clearing expired sessions."
)
with self.settings(SESSION_ENGINE="sessions_tests.no_clear_expired"):
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command("clearsessions")
|
{
"content_hash": "1669fbb2818952628947c6d7c4e86757",
"timestamp": "",
"source": "github",
"line_count": 936,
"max_line_length": 88,
"avg_line_length": 37.37713675213675,
"alnum_prop": 0.6433042732599685,
"repo_name": "felixxm/django",
"id": "d13c485342e8f7c6240d4cbe54450599f552c070",
"size": "34985",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/sessions_tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91986"
},
{
"name": "HTML",
"bytes": "238949"
},
{
"name": "JavaScript",
"bytes": "157441"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16195712"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
import boto
from boto.swf.exceptions import SWFResponseError
from datetime import datetime, timedelta
import sure # noqa
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from moto import mock_swf
from moto.core.utils import unix_time
# Utils
@mock_swf
def setup_swf_environment():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.register_workflow_type(
"test-domain", "test-workflow", "v1.0",
task_list="queue", default_child_policy="TERMINATE",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
)
conn.register_activity_type("test-domain", "test-activity", "v1.1")
return conn
# StartWorkflowExecution endpoint
@mock_swf
def test_start_workflow_execution():
conn = setup_swf_environment()
wf = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
wf.should.contain("runId")
@mock_swf
def test_start_already_started_workflow_execution():
conn = setup_swf_environment()
conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
conn.start_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
).should.throw(SWFResponseError)
@mock_swf
def test_start_workflow_execution_on_deprecated_type():
conn = setup_swf_environment()
conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0")
conn.start_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
).should.throw(SWFResponseError)
# DescribeWorkflowExecution endpoint
@mock_swf
def test_describe_workflow_execution():
conn = setup_swf_environment()
hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
run_id = hsh["runId"]
wfe = conn.describe_workflow_execution("test-domain", run_id, "uid-abcd1234")
wfe["executionInfo"]["execution"]["workflowId"].should.equal("uid-abcd1234")
wfe["executionInfo"]["executionStatus"].should.equal("OPEN")
@mock_swf
def test_describe_non_existent_workflow_execution():
conn = setup_swf_environment()
conn.describe_workflow_execution.when.called_with(
"test-domain", "wrong-run-id", "wrong-workflow-id"
).should.throw(SWFResponseError)
# GetWorkflowExecutionHistory endpoint
@mock_swf
def test_get_workflow_execution_history():
conn = setup_swf_environment()
hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
run_id = hsh["runId"]
resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"])
@mock_swf
def test_get_workflow_execution_history_with_reverse_order():
conn = setup_swf_environment()
hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
run_id = hsh["runId"]
resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234",
reverse_order=True)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"])
@mock_swf
def test_get_workflow_execution_history_on_non_existent_workflow_execution():
conn = setup_swf_environment()
conn.get_workflow_execution_history.when.called_with(
"test-domain", "wrong-run-id", "wrong-workflow-id"
).should.throw(SWFResponseError)
# ListOpenWorkflowExecutions endpoint
@mock_swf
def test_list_open_workflow_executions():
conn = setup_swf_environment()
# One open workflow execution
conn.start_workflow_execution(
'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0'
)
# One closed workflow execution to make sure it isn't displayed
run_id = conn.start_workflow_execution(
'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0'
)['runId']
conn.terminate_workflow_execution('test-domain', 'uid-abcd12345',
details='some details',
reason='a more complete reason',
run_id=run_id)
yesterday = datetime.now() - timedelta(days=1)
oldest_date = unix_time(yesterday)
response = conn.list_open_workflow_executions('test-domain',
oldest_date,
workflow_id='test-workflow')
execution_infos = response['executionInfos']
len(execution_infos).should.equal(1)
open_workflow = execution_infos[0]
open_workflow['workflowType'].should.equal({'version': 'v1.0',
'name': 'test-workflow'})
open_workflow.should.contain('startTimestamp')
open_workflow['execution']['workflowId'].should.equal('uid-abcd1234')
open_workflow['execution'].should.contain('runId')
open_workflow['cancelRequested'].should.be(False)
open_workflow['executionStatus'].should.equal('OPEN')
# ListClosedWorkflowExecutions endpoint
@mock_swf
def test_list_closed_workflow_executions():
conn = setup_swf_environment()
# Leave one workflow execution open to make sure it isn't displayed
conn.start_workflow_execution(
'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0'
)
# One closed workflow execution
run_id = conn.start_workflow_execution(
'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0'
)['runId']
conn.terminate_workflow_execution('test-domain', 'uid-abcd12345',
details='some details',
reason='a more complete reason',
run_id=run_id)
yesterday = datetime.now() - timedelta(days=1)
oldest_date = unix_time(yesterday)
response = conn.list_closed_workflow_executions(
'test-domain',
start_oldest_date=oldest_date,
workflow_id='test-workflow')
execution_infos = response['executionInfos']
len(execution_infos).should.equal(1)
open_workflow = execution_infos[0]
open_workflow['workflowType'].should.equal({'version': 'v1.0',
'name': 'test-workflow'})
open_workflow.should.contain('startTimestamp')
open_workflow['execution']['workflowId'].should.equal('uid-abcd12345')
open_workflow['execution'].should.contain('runId')
open_workflow['cancelRequested'].should.be(False)
open_workflow['executionStatus'].should.equal('CLOSED')
# TerminateWorkflowExecution endpoint
@mock_swf
def test_terminate_workflow_execution():
conn = setup_swf_environment()
run_id = conn.start_workflow_execution(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
)["runId"]
resp = conn.terminate_workflow_execution("test-domain", "uid-abcd1234",
details="some details",
reason="a more complete reason",
run_id=run_id)
resp.should.be.none
resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234")
evt = resp["events"][-1]
evt["eventType"].should.equal("WorkflowExecutionTerminated")
attrs = evt["workflowExecutionTerminatedEventAttributes"]
attrs["details"].should.equal("some details")
attrs["reason"].should.equal("a more complete reason")
attrs["cause"].should.equal("OPERATOR_INITIATED")
@mock_swf
def test_terminate_workflow_execution_with_wrong_workflow_or_run_id():
conn = setup_swf_environment()
run_id = conn.start_workflow_execution(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
)["runId"]
# terminate workflow execution
conn.terminate_workflow_execution("test-domain", "uid-abcd1234")
# already closed, with run_id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", run_id=run_id
).should.throw(
SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId="
)
# already closed, without run_id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234"
).should.throw(
SWFResponseError, "Unknown execution, workflowId = uid-abcd1234"
)
# wrong workflow id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-non-existent"
).should.throw(
SWFResponseError, "Unknown execution, workflowId = uid-non-existent"
)
# wrong run_id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", run_id="foo"
).should.throw(
SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId="
)
|
{
"content_hash": "92a5c0c453799431a7b2c0f506fe847d",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 95,
"avg_line_length": 37.99581589958159,
"alnum_prop": 0.6499284219799581,
"repo_name": "IlyaSukhanov/moto",
"id": "8e311a22c5afe8f388b2ec3c93c1007eb7254ff1",
"size": "9081",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_swf/responses/test_workflow_executions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "230"
},
{
"name": "Python",
"bytes": "2088140"
}
],
"symlink_target": ""
}
|
"""Integration platform for recorder."""
from __future__ import annotations
from homeassistant.const import ATTR_EDITABLE
from homeassistant.core import HomeAssistant, callback
from . import ATTR_MAX, ATTR_MIN, ATTR_MODE, ATTR_PATTERN
@callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
"""Exclude editable hint from being recorded in the database."""
return {
ATTR_EDITABLE,
ATTR_MAX,
ATTR_MIN,
ATTR_MODE,
ATTR_PATTERN,
}
|
{
"content_hash": "d79086ee80c98b80587f5699116ec76f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 26,
"alnum_prop": 0.6862348178137652,
"repo_name": "toddeye/home-assistant",
"id": "0f4969270d00f6e4b84522ddd1dd4eae77f84995",
"size": "494",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/input_text/recorder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
__author__ = 'thomasvangurp'
import os
from Bio import SeqIO
import subprocess
from scipy import stats
from operator import itemgetter
"""Detect DMPs between all samples, calculate their abundance and distribution"""
# Choose 2 most abundant individuals for DMP detection
# How many DMP's are there per context?
#
def get_context(file_in):
"""Get genomic context from bed file"""
output_dict = {}
handle = open(file_in,'r')
header = handle.readline().rstrip('\n').split('\t')
for line in handle:
split_line = line.rstrip('\n').split('\t')
contig,pos,context = split_line[:3]
if context == '.':
continue
try:
output_dict[contig][pos] = context
except KeyError:
output_dict[contig] = {pos:context}
return output_dict
def make_pileup(header,dir,sample_dict):
"""make pileup"""
for name in header[4:]:
if name.endswith('_methylated'):
sample_name = name.replace('_methylated','')
if sample_name not in sample_dict['watson']:
continue
else:
continue
for strand in ['watson','crick']:
output_loc = {}
output_loc['bamfile'] = '/tmp/%s.bam'%(sample_name+'_'+strand)
output_loc['output'] = os.path.join(dir,'%s.pileup'%(sample_name+'_'+strand))
output_loc['regions'] = os.path.join(dir,'%s_%s.bed'%(sample_name,strand))
pileup_cmd = 'samtools mpileup -l %(regions)s %(bamfile)s > %(output)s'%output_loc
#only run command if file does not yet exist
file_check = os.path.exists(output_loc['output'])
if file_check:
continue
p = subprocess.Popen(pileup_cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,
shell=True,executable='/bin/bash')
exit_code = p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
if stderr:
print stderr,pileup_cmd
return 0
def make_bed_files(sample_dict,out_dir):
"""generate bed file with positions to check"""
for strand,subdict in sample_dict.items():
for sample,contig_dict in subdict.items():
file_name = '%s_%s.bed'%(sample,strand)
out_handle = os.path.join(out_dir,file_name)
with open(out_handle,'w') as file_out:
for contig,positions in sorted(contig_dict.items()):
for pos in sorted(positions):
out_items = (contig,pos,)
file_out.write('%s\t%s\n'%out_items)
return 0
def filter_header(header,sample_total):
""""returns index of header given sample list"""
include_samples = open('/Users/thomasvangurp/epiGBS/Baseclear/unfiltered_sequences/seqNNAtlE/selection_DMP.txt','r')
samples = include_samples.read().split('\n')
max_cover = {}
for ind,coverage in sample_total.items():
if ind in samples:
max_cover[coverage] = ind
samples_out = []
for i in range(4):
try:
key = max(max_cover.keys())
except ValueError:
break
max_sample = max_cover[key]
samples_out.append(max_cover[key])
max_cover.pop(key)
return samples_out
def process_bed(input_bed,genome_dict,mincov):
"""Process input methylation.bed and generate sample specific watson and crick bed files"""
#could be limited to samples of interest if required.
#limited by filter
handle = open(input_bed,'r')
header = [i.replace('_total','') for i in handle.readline()[:-1].split('\t')]
sample_dict = {'watson':{},'crick':{}}
sample_total = {}
for linenumber,line in enumerate(handle):
if not linenumber%100000 and linenumber:
print 'Processed %s lines'%linenumber
split_line = line.rstrip('\n').split('\t')
contig = split_line[0]
pos = int(split_line[1])
if genome_dict[contig].seq[pos-1] == 'C':
strand = 'watson'
elif genome_dict[contig].seq[pos-1] == 'G':
strand = 'crick'
else:
print "Nucleotide incorrect, %s"%genome_dict[contig].seq[pos-1]
#totals are separated by 2 positions.
for i in range(5,len(split_line),2):
total = split_line[i]
if total == 'None':
continue
name = header[i]
try:
sample_total[name] += int(total)
except KeyError:
sample_total[name] = int(total)
try:
if int(total) > (mincov * 2):
try:
sample_dict[strand][name][contig].append(pos)
except KeyError:
if name not in sample_dict[strand]:
sample_dict[strand][name] = {contig:[pos]}
elif contig not in sample_dict[strand][name]:
sample_dict[strand][name][contig] = [pos]
except ValueError:
pass
#filter to only include the samples that we would like to get
samples_to_include = filter_header(header,sample_total)
for strand,subdict in sample_dict.items():
for sample in subdict.keys():
if sample not in samples_to_include:
subdict.pop(sample)
return sample_dict
def split_sample(header,dir,type,sample_dict):
"""Split watson and crick bam file into sample specific reads groups"""
#to limit what is processed modify header
for name in header[4:]:
if name.endswith('_methylated'):
sample_name = name.replace('_methylated','')
if sample_name not in sample_dict['watson']:
continue
else:
continue
for strand in ['watson','crick']:
split_cmd = 'samtools view -h %s/%s.bam | grep "^@SQ\|^@PG\|%s"|samtools view -Shb - > /tmp/%s.bam'%\
(dir,strand,sample_name,sample_name+'_'+strand)
index_cmd = 'samtools index /tmp/%s.bam'%(sample_name+'_'+strand)
file_check = os.path.exists('/tmp/%s.bam'%(sample_name+'_'+strand))
if file_check:
continue
for cmd in [split_cmd,index_cmd]:
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,
shell=True,executable='/bin/bash')
exit_code = p.wait()
return 0
def filter_pileup(header,input_dir,output_dir,libtype,cov_treshold,sample_dict):
"""generate output for methylation rate according to values in pileup"""
#output 1 is methylation.filtered.bed
#output 2 is methylation_ratio.filtered.igv
output_dict = {}
out_2_handle = open(os.path.join(output_dir,'methylation_ratio.filtered.igv'),'w')
for name in header[4:]:
if name.endswith('_methylated'):
sample_name = name.replace('_methylated','')
if sample_name not in sample_dict['watson']:
continue
else:
continue
for strand in ['watson','crick']:
handle = open(os.path.join(output_dir,'%s.pileup'%(sample_name+'_'+strand)))
for line in handle:
split_line = line.rstrip('\n').split('\t')
meth,nometh,ratio = get_ratio(split_line[4],strand,cov_treshold,libtype)
contig,pos = split_line[:2]
if ratio != '':
try:
output_dict[contig][int(pos)][sample_name] = {'ratio':ratio,'meth':meth,'nometh':nometh}
except KeyError:
if contig not in output_dict:
output_dict[contig] = {int(pos):{sample_name:{'ratio':ratio,'meth':meth,'nometh':nometh}}}
elif int(pos) not in output_dict[contig]:
output_dict[contig][int(pos)] = {sample_name:{'ratio':ratio,'meth':meth,'nometh':nometh}}
for contig,positions in sorted(output_dict.items()):
for pos in sorted(positions):
out_2 = [contig,str(pos)]
for name in header[4:]:
if name.endswith('_methylated'):
sample_name = name.replace('_methylated','')
else:
continue
try:
out_2.append('%.3f'%positions[pos][sample_name]['ratio'])
except KeyError:
out_2.append('.')
out_2_handle.write('\t'.join(out_2)+'\n')
return output_dict
def get_ratio(nts,strand,cov_treshold,libtype):
"""Calculate corrected ratio for position"""
nt_count = {'c':0,'C':0,'t':0,'T':0,'g':0,'G':0,'a':0,'A':0}
for nt in nt_count.keys():
nt_count[nt]+= nts.count(nt)
if strand == 'crick':
plus_count = nt_count['A']+nt_count['G']
min_count = nt_count['a']+nt_count['g']
try:
plus_ratio = nt_count['G'] / float(plus_count)
except ZeroDivisionError:
plus_ratio = ''
try:
_min_ratio = nt_count['g'] / float(min_count)
except ZeroDivisionError:
_min_ratio = ''
meth = nt_count['g'] + nt_count['G']
else:
plus_count = nt_count['T']+nt_count['C']
min_count = nt_count['t']+nt_count['c']
try:
plus_ratio = nt_count['C'] / float(plus_count)
except ZeroDivisionError:
plus_ratio = ''
try:
_min_ratio = nt_count['c'] / float(min_count)
except ZeroDivisionError:
_min_ratio = ''
meth = nt_count['c'] + nt_count['C']
if min(plus_count,min_count) > cov_treshold:
#Here we will take the average of both estimates from forward and reverse reads
nometh = plus_count + min_count - meth
#if one of the ratios below 0.05, return 0 as ratio, this to avoid dubious methylation calls.
if min(_min_ratio,plus_ratio) < 0.05:
return 0,nometh+meth,0.0
else:
return meth,nometh,(_min_ratio + plus_ratio) / 2.0
elif libtype == 'WGBS':
#for WGBS libraries we do not want to be as stringent as for epiGBS
#This exercise serves to compare becker et al with epiGBS.
nometh = plus_count + min_count - meth
try:
ratio = meth/float(nometh+meth)
except ZeroDivisionError:
ratio = 0.0
return meth,nometh,ratio
else:
return None,None,''
def fisher_exact(output_dict,out_dir):
"""Do fisher exact test for pairwise DMP's between loci"""
two_by_two = {}
for contig,positions in sorted(output_dict.items()):
for pos in sorted(positions.keys()):
for i,(ind1,counts1) in enumerate(sorted(positions[pos].items())):
for j,(ind2,counts2) in enumerate(sorted(positions[pos].items())):
if ind1 == ind2 or j<i:
continue
table = [[counts1['meth'],counts1['nometh']],
[counts2['meth'],counts2['nometh']]]
total_1 = float(counts1['nometh'] + counts1['meth'])
total_2 = float(counts2['nometh'] + counts2['meth'])
ratio_1 = counts1['meth'] / total_1
ratio_2 = counts2['meth'] / total_2
oddsratio, pvalue = stats.fisher_exact(table)
try:
two_by_two['%s_%s'%(ind1,ind2)][contig].append([pos,pvalue,ratio_1,ratio_2,total_1,total_2])
except KeyError:
if '%s_%s'%(ind1,ind2) not in two_by_two:
two_by_two['%s_%s'%(ind1,ind2)] = {}
two_by_two['%s_%s'%(ind1,ind2)][contig] = [[pos,pvalue,ratio_1,ratio_2,total_1,total_2]]
#now apply bonferroni correction to table
for comparison in two_by_two.keys():
handle = open(os.path.join(out_dir,'%s.csv'%comparison),'w')
output = {}
total_length = 0
for contig,values in two_by_two[comparison].items():
total_length += len(values)
min_p = 0.05 / total_length
handle.write('%s\t%s\n'%(comparison,min_p))
for contig,values in two_by_two[comparison].items():
for item in values:
# try:
# output[contig].append(item)
# except KeyError:
# output[contig] = [item]
# # two_by_two[comparison] = output
# if contig in output:
# for item in output[contig]:
handle.write('%s\t%s\t%s'%(comparison,contig,'\t'.join([str(i) for i in item])))
if item[1] < min_p:
handle.write('\t1\n')
else:
handle.write('\t0\n')
return two_by_two
def try_to_add(entry,dict,keys,position):
""""try to add position to dict and create key if it fails"""
try:
dict[keys[0]]['%s_%s'%(keys[1],entry)] += [position]
except KeyError:
if keys[0] not in dict:
dict[keys[0]] = {'%s_%s'%(keys[1],entry):[position]}
else:
dict[keys[0]]['%s_%s'%(keys[1],entry)] = [position]
return dict
def check_symmetric_meth(items_in):
"""Check whether methylation is present or absent for both cytosines in symmetric contexts"""
#[position,p-value,meth_ratio_0,meth_ratio_1,total_0,total_1]
items_out = []
min_meth = 0.05
if len(items_in)%2:
print "wrong length for items_in!"
for i in range(0,len(items_in)-1,2):
watson,crick = items_in[i:i+2]
obs = 0
for pos in [2,3]:
#if methylation is present it should be symmetric and be present in both watson and crick for both strands
if max(watson[pos],crick[pos]) > min_meth:
if min(watson[pos],crick[pos]) > min_meth:
obs += 1
#if no evidence for methylation is found, good, retain positions as well
elif max(watson[pos],crick[pos]) < min_meth:
if min(watson[pos],crick[pos]) < min_meth:
obs += 1
#only if presence / absence for both individuals is congruent add to output
if obs == 2:
items_out.append(watson)
items_out.append(crick)
return items_out
def retain_symmetric(dict,context_dic):
"""retain only symmetric positions in case of methylation both strand should be methylated!"""
for comp,subdict in dict.items():
for contig,items in subdict.items():
#reset the content of subdict[contig] so that it is empty
subdict[contig] = []
context_items = {}
in_items = []
for item in items:
try:
context = context_dic[contig][str(item[0])]
except KeyError:
continue
try:
context_items[context].append(item)
except KeyError:
context_items[context] = [item]
#now retain only symmetric positions in comparison for CG and CHG
for context,items in sorted(context_items.items()):
if context == 'CHH':
subdict[contig] += items
continue
elif 'CHG' in context:
offset = 2
elif 'CG' in context:
offset =1
for i,item in enumerate(items[:-1]):
if items[i+1][0] == item[0] + offset:
if item not in in_items:
in_items.append(item)
else:
continue
if items[i+1] not in in_items:
in_items.append(items[i+1])
#now check if methylated positions in CG and CHG are symmetric if methylated, otherwise remove
out_items = check_symmetric_meth(in_items)
subdict[contig] += out_items
in_items = []
dict[comp][contig] = subdict[contig]
return dict
def retain_symmetric_DMP(summary_dict):
"""only retain symmetric DMPs"""
out_dict = {}
for cluster_type,subdict in summary_dict.items():
for type,items in subdict.items():
if type.split('_')[0] == 'CG':
offset = 1
elif type.split('_')[0] == 'CHG':
offset = 2
else:
continue
items_out = []
items = sorted(items, key=itemgetter(0,1,2))
for i,item in enumerate(items[:-1]):
#items should be from same comparison on same contig:
if items[i+1][0] == item[0] and items[i+1][1] == item[1]:
if items[i+1][2] == item[2] + offset:
if item not in items_out:
items_out.append(items[i])
items_out.append(items[i+1])
else:
continue
try:
out_dict[cluster_type][type] = items_out
except KeyError:
if cluster_type not in out_dict:
out_dict[cluster_type] = {type:items_out}
return out_dict
def get_dmp_stats(combined_entries,output_dir,context):
"""Write the combined entries and generate tables"""
positions = {}
#make a dictionary with dict[cluster] = {'cg_count':12,'chg_count':30,'chh_count':20,'cg_DMP':}
dmp_dict = {}
for comparison,subdict in combined_entries.items():
item_len = sum([len(v) for v in subdict.values()])
p_min = 0.05 / float(item_len)
for contig,list in subdict.items():
for item in list:
position,pvalue,ratio0,ratio1,total1,total2 = item
ratio_diff = max(ratio0,ratio1) - min(ratio0,ratio1)
total_sum = int(total1) + int(total2)
try:
ctype = context[contig][str(position)]
except KeyError:
continue
#maybe we should have uniform representation here:
output = (comparison,contig,position,pvalue,ratio0,ratio1,total1,total2)
dmp_dict = try_to_add('count',dmp_dict,[contig,ctype],output)
if pvalue < p_min:# and ratio_diff > 0.7:#
dmp_dict = try_to_add('DMP',dmp_dict,[contig,ctype],output)
return dmp_dict
def write_dmp_stats(dmp_dict,output_dir,input_dir):
"""Get DMP statistics per species"""
gene_list = open(os.path.join(input_dir,'output_denovo') + '/genes.txt','r')
gene_list = gene_list.read().split('\n')
te_list = open(os.path.join(input_dir,'output_denovo') + '/te.txt','r')
te_list = te_list.read().split('\n')
summary_dict = {}
median_dict = {}
diff_dict = {}
out_path = os.path.join(output_dir,'dmp.csv')
output = open(out_path,'w')
for contig,subdict in dmp_dict.items():
if contig in gene_list:
cluster_type = 'gene'
elif contig in te_list:
cluster_type = 'te'
else:
cluster_type = 'other'
for type,items in subdict.items():
median_add = [sum(v[-2:]) for v in items]
diff_add = ['%.4f'%(v[5]-v[4]) for v in items]
if items == []:
continue
try:
median_dict[cluster_type][type]+= median_add
diff_dict[cluster_type][type]+= diff_add
except KeyError:
if cluster_type not in median_dict:
median_dict[cluster_type] = {type:median_add}
diff_dict[cluster_type ] = {type:diff_add}
else:
median_dict[cluster_type][type] = median_add
diff_dict[cluster_type][type] = diff_add
#now we go for writing DMP statistics..
try:
summary_dict[cluster_type][type]+=items
except KeyError:
if cluster_type not in summary_dict:
summary_dict[cluster_type] = {type:items}
else:
summary_dict[cluster_type][type] = items
# for cluster_type,subdict in summary_dict.items():
# for key,value in sorted(subdict.items()):
# median = sorted(median_dict[type][key])[value/2]
# print type,key,value,median
symmetric_summary_dict = retain_symmetric_DMP(summary_dict)
for cluster_type,subdict in symmetric_summary_dict.items():
for key,values in sorted(subdict.items()):
if 'CHH' not in key:
for v in values:
if cluster_type != 'te':
out_line = [cluster_type,key] + [str(i) for i in v]
output.write('\t'.join(out_line)+'\n')
if values == []:
continue
else:
continue
out_line = [cluster_type] + key.split('_') + ['%s'%(len(values))]
print '\t'.join(out_line)
def summary_methylation(dmp_dict,output_dir,input_dir):
"""Get methylation statistics per species"""
gene_list = open(os.path.join(input_dir,'output_denovo') + '/genes.txt','r')
gene_list = gene_list.read().split('\n')
te_list = open(os.path.join(input_dir,'output_denovo') + '/te.txt','r')
te_list = te_list.read().split('\n')
summary_dict = {}
for contig,subdict in dmp_dict.items():
if contig in gene_list:
type = 'gene'
elif contig in te_list:
type = 'te'
else:
type = 'other'
for key,value in subdict.items():
if value == [] or 'count' not in key:
continue
if 'CHH' not in key:
methylated = 0
methylated = len([v[-4] for i,v in enumerate(value[:-1]) if v[-4]>0.05 and value[i+1][-4]>0.05 and not i%2])
methylated += len([v[-3] for i,v in enumerate(value[:-1]) if v[-3]>0.05 and value[i+1][-3]>0.05 and not i%2])
else:
methylated = len([v[-3] for v in value if v[-3]>0.05])
methylated += len([v[-4] for v in value if v[-4]>0.05])
non_meth = len(value) - methylated
try:
summary_dict[type][key]['methylated'] += methylated
summary_dict[type][key]['total'] += methylated+non_meth
except KeyError:
if type not in summary_dict:
summary_dict[type] = {key:{'methylated':methylated,'total':methylated+non_meth}}
elif key not in summary_dict[type]:
summary_dict[type][key] = {'methylated':methylated,'total':methylated+non_meth}
for type,subdict in summary_dict.items():
for key,value in sorted(subdict.items()):
output = '\t'.join([type,key.split('_')[0],str(value['methylated']),str(value['total'])])
print output
def run_epiGBS(dir):
"""Run analysis for epiGBS data in directory"""
ref = os.path.join(dir,'output_denovo','consensus_cluster.renamed.fa')
input_bed = os.path.join(dir,'output_mapping','methylation.bed')
input_dir = os.path.join(dir,'output_mapping')
out_dir = os.path.join(dir,'dmp')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
with open(input_bed,'r') as file_handle:
#get global header
header = file_handle.readline()[:-1].split('\t')
#set minimum coverage per strand
mincov = 10
#Get genome dictionary to determine strand of positions
genome_dict = SeqIO.to_dict(SeqIO.parse(open(ref),'fasta'))
#process bed file to get per sample list of positions that are candidates
sample_dict = process_bed(input_bed,genome_dict,mincov)
#make sample specific bed filed
make_bed_files(sample_dict,out_dir)
#split bam file in sample specific bam files
split_sample(header,input_dir,'epiGBS',sample_dict)
#make pileup per bam file
make_pileup(header,out_dir,sample_dict)
# filter pileup, calculate per position methylation rate
libtype = 'epiGBS'
output_dict = filter_pileup(header,input_dir,out_dir,libtype,mincov,sample_dict)
#Do Fisher exact test for differentially methylated positions between loci
diff_meth_epiGBS = fisher_exact(output_dict,out_dir)
# combined_entries = compare_epi_WGBS()
context = get_context(input_bed)
#get counts per context
#only retain symmetric positions
diff_meth_epiGBS = retain_symmetric(diff_meth_epiGBS,context)
dmp_stats = get_dmp_stats(diff_meth_epiGBS,out_dir,context)
write_dmp_stats(dmp_stats,out_dir,dir)
summary_methylation(dmp_stats,out_dir,dir)
run_epiGBS('/Users/thomasvangurp/epiGBS/Baseclear/unfiltered_sequences/seqNNAtlE/Fallopia/')
|
{
"content_hash": "7975539c5e6ab9bec620e6c35bc0e2a5",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 125,
"avg_line_length": 43.87543859649123,
"alnum_prop": 0.5438442160822105,
"repo_name": "thomasvangurp/epiGBS",
"id": "df486e4aa5302964b643fa13a6f1f93e5f55fb5d",
"size": "25009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Arabidopsis analysis/all_DMP_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "917"
},
{
"name": "CSS",
"bytes": "10826"
},
{
"name": "JavaScript",
"bytes": "5710"
},
{
"name": "Python",
"bytes": "819863"
},
{
"name": "R",
"bytes": "3628065"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_item_tapestry_tatooine.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "07e008869944616793b796d5814c3bad",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.6920529801324503,
"repo_name": "anhstudios/swganh",
"id": "65af8eb72f361692e4643fd793b65c79f7f0c59d",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/item/shared_item_tapestry_tatooine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
import re
from itertools import chain
from glob import iglob
import sys
import argparse
# The set of Unicode code points greater than 127 that we
# allow in the source code.
box_drawing_chars = set(chr(cp) for cp in range(0x2500, 0x2580))
allowed = (set(['®', 'é', 'ö', 'λ', 'π', 'ω', '∫', '≠']) |
box_drawing_chars)
def unicode_check(showall=False):
"""
If showall is True, all non-ASCII characters are displayed.
"""
# File encoding regular expression from PEP-263.
encoding_pat = re.compile("^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
nbad = 0
for name in chain(iglob('scipy/**/*.py', recursive=True),
iglob('scipy/**/*.pyx', recursive=True),
iglob('scipy/**/*.px[di]', recursive=True)):
# Read the file as bytes, and check for any bytes greater than 127.
with open(name, 'rb') as f:
content = f.read()
if len(content) == 0:
continue
if max(content) > 127:
# There is at least one non-ASCII character in the file.
# Check the first two lines for an encoding comment.
lines = content.splitlines()
for line in lines[:2]:
match = re.match(encoding_pat,
line.decode(encoding='latin-1'))
if match:
break
# If an explicit encoding was given in a comment, use
# that to decode the contents. Otherwise use UTF-8.
if match:
encoding = match[1]
file_enc_msg = f"(explicit encoding '{encoding}')"
else:
encoding = 'utf-8'
file_enc_msg = "(no explicit encoding; utf-8 assumed)"
content = content.decode(encoding=encoding)
out = []
for n, line in enumerate(content.splitlines()):
for pos, char in enumerate(line):
cp = ord(char)
if cp > 127:
msg = (f"... line {n+1}, position {pos+1}: "
f"character '{char}', code point U+{cp:04X}")
if showall:
out.append(msg)
else:
if char not in allowed:
out.append(msg)
if len(out) > 0:
nbad += 1
print(f"{name} {file_enc_msg}")
for msg in out:
print(msg)
return nbad
if __name__ == "__main__":
descr = ('Check for disallowed Unicode characters in the SciPy Python and '
' Cython source code.')
parser = argparse.ArgumentParser(description=descr)
parser.add_argument('--showall', action='store_true',
help=('Show non-ASCII Unicode characters from all '
'files.'))
args = parser.parse_args()
sys.exit(unicode_check(args.showall) > 0)
|
{
"content_hash": "3254c0df81ae708bb03a4e728e1c8044",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 37.48148148148148,
"alnum_prop": 0.4980237154150198,
"repo_name": "nmayorov/scipy",
"id": "6d1ef16eee85b0f8ce84831b756c14734b1ad263",
"size": "3046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/unicode-check.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4418291"
},
{
"name": "C++",
"bytes": "672553"
},
{
"name": "Dockerfile",
"bytes": "1328"
},
{
"name": "Fortran",
"bytes": "5300184"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "13498627"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import os
import sys
BASE_APP_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(BASE_APP_DIR, 'src/'))
sys.path.append(os.path.join(BASE_APP_DIR, 'venv/lib/python3.4/site-packages'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "f4017d580ad4333866be2187cddbdf7d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 79,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.7513089005235603,
"repo_name": "Jafte/RTTTM",
"id": "e9d1703c124d4cabc394bc6f35ee597843c6a769",
"size": "404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "passenger_wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "689142"
},
{
"name": "HTML",
"bytes": "111790"
},
{
"name": "JavaScript",
"bytes": "612780"
},
{
"name": "Python",
"bytes": "50501"
}
],
"symlink_target": ""
}
|
"""Handles all requests relating to volumes."""
import collections
import datetime
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from cinder.api import common
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import keymgr
from cinder import objects
from cinder.objects import base as objects_base
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
allow_force_upload = cfg.BoolOpt('enable_force_upload',
default=False,
help='Enables the Force option on '
'upload_to_image. This enables '
'running upload_volume on in-use '
'volumes for backends that support it.')
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(allow_force_upload)
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(target_obj.obj_to_primitive() or {})
else:
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval tuple of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled'])
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s.", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
LOG.info(_LI("Availability Zones retrieved successfully."))
return tuple(azs)
def _retype_is_possible(self, context,
first_type_id, second_type_id,
first_type=None, second_type=None):
safe = False
if len(self.db.service_get_all_by_topic(context,
'cinder-volume',
disabled=True)) == 1:
safe = True
else:
type_a = first_type or volume_types.get_volume_type(
context,
first_type_id)
type_b = second_type or volume_types.get_volume_type(
context,
second_type_id)
if(volume_utils.matching_backend_name(type_a['extra_specs'],
type_b['extra_specs'])):
safe = True
return safe
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None,
cgsnapshot=None, multiattach=False):
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possibility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation of true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request: %s '
'(size argument must be an integer (or string '
'representation of an integer) and greater '
'than zero).') % size
raise exception.InvalidInput(reason=msg)
if consistencygroup and not cgsnapshot:
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this consistency "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
if not self._retype_is_possible(
context,
volume_type['id'],
source_volume['volume_type_id'],
volume_type):
msg = _("Invalid volume_type provided: %s (requested type "
"is not compatible; either match source volume, "
"or omit type argument).") % volume_type['id']
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
if not self._retype_is_possible(context,
volume_type['id'],
snapshot['volume_type_id'],
volume_type):
msg = _("Invalid volume_type provided: %s (requested "
"type is not compatible; recommend omitting "
"the type argument).") % volume_type['id']
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
'multiattach': multiattach,
}
try:
sched_rpcapi = self.scheduler_rpcapi if not cgsnapshot else None
volume_rpcapi = self.volume_rpcapi if not cgsnapshot else None
flow_engine = create_volume.get_flow(self.db,
self.image_service,
availability_zones,
create_what,
sched_rpcapi,
volume_rpcapi)
except Exception:
msg = _('Failed to create api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Volume created successfully."), resource=vref)
return vref
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."),
resource={'type': 'volume',
'id': volume_id})
return
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
LOG.info(_LI('Unable to delete volume: %s, '
'volume is attached.'), volume['id'])
raise exception.VolumeAttached(volume_id=volume_id)
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s.") % volume['status']
LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
'volume must be available or '
'error, but is %(vol_status)s.'),
{'vol_id': volume['id'],
'vol_status': volume['status']})
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently migrating.'), volume['id'])
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if volume['consistencygroup_id'] is not None:
msg = _("Volume cannot be deleted while in a consistency group.")
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently part of a '
'consistency group.'), volume['id'])
raise exception.InvalidVolume(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume_id)
if len(snapshots):
LOG.info(_LI('Unable to delete volume: %s, '
'volume currently has snapshots.'), volume['id'])
msg = _("Volume still has %d dependent "
"snapshots.") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
vref = self.db.volume_update(context,
volume_id,
{'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI("Delete volume request issued successfully."),
resource=vref)
@wrap_check_policy
def update(self, context, volume, fields):
vref = self.db.volume_update(context, volume['id'], fields)
LOG.info(_LI("Volume updated successfully."), resource=vref)
def get(self, context, volume_id, viewable_admin_meta=False):
if viewable_admin_meta:
ctxt = context.elevated()
else:
ctxt = context
rv = self.db.volume_get(ctxt, volume_id)
volume = dict(rv)
try:
check_policy(context, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
LOG.info(_LI("Volume info retrieved successfully."), resource=rv)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, viewable_admin_meta=False):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
LOG.info(_LI("Get all volumes completed successfully."))
return volumes
def get_snapshot(self, context, snapshot_id):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
# FIXME(jdg): The objects don't have the db name entries
# so build the resource tag manually for now.
LOG.info(_LI("Snapshot retrieved successfully."),
resource={'type': 'snapshot',
'id': snapshot['id']})
return snapshot
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
vref = self.db.volume_get(context, volume_id)
LOG.info(_LI("Volume retrieved successfully."), resource=vref)
return dict(vref)
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = objects.SnapshotList.get_all(context,
search_opts)
else:
snapshots = objects.SnapshotList.get_all_by_project(
context, context.project_id, search_opts)
LOG.info(_LI("Get all snaphsots completed successfully."))
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
# NOTE(jdg): check for Race condition bug 1096983
# explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
elif volume['status'] == 'in-use':
if volume['multiattach']:
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume must be multiattachable to reserve again.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
else:
msg = _("Volume status must be available to reserve.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Reserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def unreserve_volume(self, context, volume):
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'attaching':
attaches = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if attaches:
self.update(context, volume, {"status": "in-use"})
else:
self.update(context, volume, {"status": "available"})
LOG.info(_LI("Unreserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def begin_detaching(self, context, volume):
# NOTE(vbala): The volume status might be 'detaching' already due to
# a previous begin_detaching call. Get updated volume status so that
# we fail such cases.
volume = self.db.volume_get(context, volume['id'])
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if volume['migration_status']:
return
if (volume['status'] != 'in-use' or
volume['attach_status'] != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s.'") %
{'status': volume['status'],
'attach_status': volume['attach_status']})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
LOG.info(_LI("Begin detaching volume completed successfully."),
resource=volume)
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
LOG.info(_LI("Roll detaching of volume completed successfully."),
resource=volume)
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
attach_results = self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attach_results
@wrap_check_policy
def detach(self, context, volume, attachment_id):
detach_results = self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
LOG.info(_LI("Detach volume completed successfully."),
resource=volume)
return detach_results
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
init_results = self.volume_rpcapi.initialize_connection(context,
volume,
connector)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return init_results
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
results = self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
return results
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
results = self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume)
return results
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Volume %(vol_id)s status must be available, "
"but current status is: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
snapshot = None
try:
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata or {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if hasattr(snapshot, 'id'):
snapshot.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = objects.Snapshot(context=context, **options)
snapshot.create()
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
snap.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume %(vol_id)s "
"is not available, current volume status: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
result = self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
LOG.info(_LI("Snapshot create request issued successfully."),
resource=result)
return result
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
result = self._create_snapshot(context, volume, name, description,
True, metadata)
LOG.info(_LI("Snapshot force create request issued successfully."),
resource=result)
return result
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, '
'due to invalid status. '
'Status must be available or '
'error, not %(snap_status)s.'),
{'snap_id': snapshot['id'],
'snap_status': snapshot['status']})
msg = _("Volume Snapshot status must be available or error.")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
if cgsnapshot_id:
msg = _('Unable to delete snapshot %s because it is part of a '
'consistency group.') % snapshot['id']
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
snapshot_obj = self.get_snapshot(context, snapshot['id'])
snapshot_obj.status = 'deleting'
snapshot_obj.save()
volume = self.db.volume_get(context, snapshot_obj.volume_id)
self.volume_rpcapi.delete_snapshot(context, snapshot_obj,
volume['host'])
LOG.info(_LI("Snapshot delete request issued successfully."),
resource=snapshot)
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
snapshot.update(fields)
snapshot.save()
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume metadata completed successfully."),
resource=volume)
return dict(rv)
@wrap_check_policy
def delete_volume_metadata(self, context, volume,
key, meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key, meta_type)
LOG.info(_LI("Delete volume metadata completed successfully."),
resource=volume)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters.")
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume,
metadata, delete=False,
meta_type=common.METADATA_TYPES.user):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
if meta_type == common.METADATA_TYPES.user:
orig_meta = self.get_volume_metadata(context, volume)
elif meta_type == common.METADATA_TYPES.image:
try:
orig_meta = self.get_volume_image_metadata(context,
volume)
except exception.GlanceMetadataNotFound:
orig_meta = {}
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume['id'])
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata,
delete,
meta_type)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume metadata completed successfully."),
resource=volume)
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
LOG.info(_LI("Get volume metadata key completed successfully."),
resource=volume)
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume admin metadata completed successfully."),
resource=volume)
return dict(rv)
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
LOG.info(_LI("Delete volume admin metadata completed successfully."),
resource=volume)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume admin metadata completed successfully."),
resource=volume)
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot['id'])
LOG.info(_LI("Get snapshot metadata completed successfully."),
resource=snapshot)
return snapshot_obj.metadata
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot['id'])
snapshot_obj.delete_metadata_key(context, key)
LOG.info(_LI("Delete snapshot metadata completed successfully."),
resource=snapshot)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = snapshot.metadata
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
snapshot.metadata = _metadata
snapshot.save()
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update snapshot metadata completed successfully."),
resource=snapshot)
return snapshot.metadata
def get_snapshot_metadata_value(self, snapshot, key):
LOG.info(_LI("Get snapshot metadata value not implemented."),
resource=snapshot)
# FIXME(jdg): Huh? Pass?
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume image-metadata completed successfully."),
resource=volume)
return {meta_entry.key: meta_entry.value for meta_entry in db_data}
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be '
'available or in-use, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
if not CONF.enable_force_upload and force:
LOG.info(_LI("Force upload to image is disabled, "
"Force option will be ignored."),
resource={'type': 'volume', 'id': volume['id']})
force = False
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
properties = {custom_property:
volume_image_metadata[custom_property]
for custom_property in custom_property_set}
metadata.update(dict(properties=properties))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
LOG.info(_LI("Copy image to volume completed successfully."),
resource=volume)
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to extend, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s).") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reserve_opts = {'gigabytes': size_increase}
QUOTAS.add_volume_type_opts(context, reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=volume['project_id'],
**reserve_opts)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).")
LOG.error(msg, {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
LOG.info(_LI("Extend volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be available or in-use, '
'but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume %s is already part of an active "
"migration.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = objects.SnapshotList.get_all_for_volume(context, volume['id'])
if snaps:
msg = _("Volume %s must not have snapshots.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume %s must not be replicated.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume %s must not be part of a consistency "
"group.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = _('No available service named %s') % host
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different '
'than the current host.')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
LOG.info(_LI("Migrate volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Migrate volume completion issued successfully."),
resource=volume)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to update readonly flag, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
LOG.info(_LI("Update readonly setting on volume "
"completed successfully."),
resource=volume)
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status: '
'%(vol_status)s on volume: %(vol_id)s. Volume status '
'must be available or '
'in-use.') % {'vol_status': volume['status'],
'vol_id': volume['id']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = _('New volume_type same as original: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements.')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volume: %s.') % volume['id']
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
LOG.info(_LI("Retype volume request issued successfully."),
resource=volume)
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata,
'bootable': bootable
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
LOG.info(_LI("Manage volume request issued successfully."),
resource=volume)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window.
On start, it triggers volume evacuation.
"""
raise NotImplementedError()
|
{
"content_hash": "80234e69e3733fc6ef7c6a48aa6a2416",
"timestamp": "",
"source": "github",
"line_count": 1504,
"max_line_length": 79,
"avg_line_length": 44.89494680851064,
"alnum_prop": 0.5271467077396996,
"repo_name": "JioCloud/cinder",
"id": "a958a0272529a8574ac942cb5ea0fbb7822e4b11",
"size": "68254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11977630"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.added_by'
db.add_column('profiles_userprofile', 'added_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='users_added', null=True, to=orm['auth.User']), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.added_by'
db.delete_column('profiles_userprofile', 'added_by_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.ircchannel': {
'Meta': {'object_name': 'IRCChannel'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': "orm['auth.User']"}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '15', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'gpg_key': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.IRCChannel']", 'symmetrical': 'False'}),
'irc_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'private_email_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'twitter_account': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['profiles']
|
{
"content_hash": "ab1b3e7c72f9fe8ed4bd07aee212ced1",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 204,
"avg_line_length": 75.86666666666666,
"alnum_prop": 0.5546280023432923,
"repo_name": "chirilo/remo",
"id": "fb8ae81ba08c1acb12c4a3ad9fa1c0aaae337456",
"size": "6846",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/profiles/migrations/0002_auto__add_field_userprofile_added_by.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "Batchfile",
"bytes": "4531"
},
{
"name": "CSS",
"bytes": "372453"
},
{
"name": "HTML",
"bytes": "373393"
},
{
"name": "JavaScript",
"bytes": "606447"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Puppet",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "7483058"
},
{
"name": "Shell",
"bytes": "3221"
},
{
"name": "Smarty",
"bytes": "215"
},
{
"name": "TeX",
"bytes": "1525"
}
],
"symlink_target": ""
}
|
import re
import requests
from lxml import html as lxml_html
__all__ = ['TDClient']
def clear_text(text):
text = text.replace('R$', '').replace('.', '').replace(',', '.')
text = text.replace('\r', '').replace('\n', '').strip()
try:
return float(text)
except ValueError:
return text
def calculate(title, data):
if title.endswith('(LFT)'):
return data['net_value'] - data['initial_value']
return None
class TDClient(object):
URL = 'https://tesourodireto.bmfbovespa.com.br/PortalInvestidor/'
USER_AGENT = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'
)
def __init__(self):
self.session = requests.Session()
self.session.headers = {
'User-Agent': self.USER_AGENT,
'Referer': self.URL
}
# The logout URL
self._logout_url = None
def login(self, cpf, password):
html = self.session.get(self.URL, verify=False, timeout=10).content
info = lxml_html.fromstring(html)
# Build post data
post_data = self._build_base_post_data(info)
post_data['ctl00$BodyContent$txtLogin'] = cpf
post_data['ctl00$BodyContent$txtSenha'] = password
post_data['ctl00$BodyContent$btnLogar'] = 'Entrar'
# Submit login information (TODO: check if login succeeded)
resp = self.session.post(self.URL, post_data)
# Maintain the URL for logout
self._logout_url = lxml_html.fromstring(resp.content).xpath(
'//div[@id="user-logoff-desktop"]/p/a[last()]/@href')[0]
def get_titles(self, month, year):
# Get base statement page and build base post data
statement = self.session.get(self.URL + 'extrato.aspx')
statement = lxml_html.fromstring(statement.content)
post_data = self._build_base_post_data(statement)
# Add the query we are looking for
post_data['ctl00$BodyContent$ddlMes'] = month
post_data['ctl00$BodyContent$ddlAno'] = year
post_data['ctl00$BodyContent$btnConsultar'] = 'Consultar'
# Get data and parse the information
statement = self.session.post(self.URL + 'extrato.aspx', post_data)
statement = lxml_html.fromstring(statement.content)
# The columns of the statement page
columns = [
'initial_titles', 'credit_titles', 'debit_titles',
'bloqued_titles', 'current_titles',
'initial_value', 'gross_value', 'net_value'
]
keyre = re.compile("\('QS=(.*)'\)")
# Find all brokerages available
index = {}
brokerages = statement.xpath('//p[@class="title doc"]')
for brokerage in brokerages:
name = brokerage.xpath('a/text()')[0]
# Add an entry to this brokerage in the index
calcs = {}
data = {}
index[name] = data
# The data section that have information about all titles of
# a brokerage is just above the paragraph with its title.
section = brokerage.getparent()
# Lets use the fact that the data we are looking for is in a
# table line with 10 columns, so we can easily find it.
rows = section.xpath('.//tr')
for row in rows:
tds = row.xpath('td')
if len(tds) == 10:
values = map(lambda x: clear_text(x.text), tds[1:9])
table = dict(zip(columns, values))
# The first column is the title, that is inside a link
title = clear_text(tds[0].xpath('a/text()')[0])
# The last column has the key to get data for the title
value = tds[9].xpath('a/@onclick')[0]
table['key'] = keyre.search(value).group(1)
# Consolidate the information
data[title] = table
# Calculate information about the title
calcs[title] = calculate(title, table)
return index
def get_title_details(self, name, title):
# Get data and parse the information
resp = self.session.get(
self.URL + 'extrato-analitico.aspx?QS=%s' % title['key']
)
details = lxml_html.fromstring(resp.content)
# The columns of the details page
columns = [
'current_balance', 'initial_transaction', 'gross_value',
'buy_unit', 'agreed_rate', 'days', 'ir_rate', 'ir_tax', 'iof_tax',
'bvmf_tax', 'custody_tax', 'net_value',
'gross_rate_month', 'gross_rate_year',
'gross_rate_last_12_months', 'gross_rate_total'
]
# Lets use the fact that the data we are looking for is in a table
# line with 17 columns and class 'nowrap', so we can easily find it.
index = {}
rows = details.xpath('//tr[@class="nowrap"]')
for row in rows:
tds = row.xpath('td')
if len(tds) == 17:
values = map(lambda x: clear_text(x.text), tds[1:])
data = dict(zip(columns, values))
data['days'] = int(data['days'])
# The first column is the buy date
date = clear_text(tds[0].text)
# Consolidate the information
index[date] = data
return index
def logout(self):
if self._logout_url:
return self.session.get(self.URL + self._logout_url)
return None
def _build_base_post_data(self, current):
"""Generic post data builder with common fields in the forms."""
post_data = {}
fields = [
'__VIEWSTATE',
'__VIEWSTATEGENERATOR',
'__EVENTVALIDATION',
'__EVENTTARGET',
'__EVENTARGUMENT',
('BodyContent_hdnCamposRequeridos',
'ctl00$BodyContent$hdnCamposRequeridos')
]
for field in fields:
# Get input id and name
try:
(id_, name_) = field
except ValueError:
id_ = name_ = field
# Get value and set in the post data
input_ = current.xpath('//input[@id="%s"]' % id_)
if input_:
value = input_[0].value
post_data[name_] = value
return post_data
|
{
"content_hash": "3d7baac999fa018f8ec8ae820cff3c70",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 78,
"avg_line_length": 35.30601092896175,
"alnum_prop": 0.5455811793839963,
"repo_name": "herberthudson/tesouro-direto",
"id": "1b8c677c21904fdd6ca83a3d8c3755343125a3e9",
"size": "6478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tesouro/direto/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "17246"
}
],
"symlink_target": ""
}
|
from madpub.conf.settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('You', 'your@email'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(VAR_ROOT, 'dev.db'),
}
}
ROOT_URLCONF = '%s.conf.local.urls' % PROJECT_MODULE_NAME
INSTALLED_APPS += (
'django.contrib.admin',
'django.contrib.admindocs',
)
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
|
{
"content_hash": "8844ee46054b423cc0606c2cd26c7f32",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 57,
"avg_line_length": 18.36,
"alnum_prop": 0.6230936819172114,
"repo_name": "malept/mad-mobileweb",
"id": "fc867bf4f3cd0a99f8ae60cc596d4adb58ed16a5",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "madpub/conf/local/example/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "292302"
},
{
"name": "Python",
"bytes": "16271"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import TagKey, TagKeyStatus, TagValue
class ProjectTagKeyValuesEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def get(self, request, project, key):
"""
List a Tag's Values
```````````````````
Return a list of values associated with this key. The `query`
parameter can be used to to perform a "starts with" match on
values.
:pparam string organization_slug: the slug of the organization.
:pparam string project_slug: the slug of the project.
:pparam string key: the tag key to look up.
:auth: required
"""
if key in ('release', 'user', 'filename', 'function'):
lookup_key = 'sentry:{0}'.format(key)
else:
lookup_key = key
try:
tagkey = TagKey.objects.get(
project=project,
key=lookup_key,
status=TagKeyStatus.VISIBLE,
)
except TagKey.DoesNotExist:
raise ResourceDoesNotExist
queryset = TagValue.objects.filter(
project=project,
key=tagkey.key,
)
query = request.GET.get('query')
if query:
queryset = queryset.filter(value__istartswith=query)
return self.paginate(
request=request,
queryset=queryset,
order_by='-id',
on_results=lambda x: serialize(x, request.user),
)
|
{
"content_hash": "d901c2f5414952015ab14355ea9d2301",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 31.072727272727274,
"alnum_prop": 0.5985956699824458,
"repo_name": "wong2/sentry",
"id": "364ed90c44974cd33a77e76889d0c0fd91829c87",
"size": "1709",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/project_tagkey_values.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157899"
},
{
"name": "HTML",
"bytes": "196983"
},
{
"name": "JavaScript",
"bytes": "380189"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6474793"
}
],
"symlink_target": ""
}
|
import json
import os
import re
import tempfile
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
import mock
from nose.tools import eq_
from PIL import Image
from pyquery import PyQuery as pq
from waffle.models import Flag
import amo
import amo.tests
from amo.tests import formset, initial
from amo.tests.test_helpers import get_image_path
from amo.urlresolvers import reverse
from addons.forms import AddonFormBasic
from addons.models import (Addon, AddonCategory, AddonDependency, AddonUser,
Category)
from bandwagon.models import Collection, CollectionAddon, FeaturedCollection
from devhub.models import ActivityLog
from tags.models import Tag, AddonTag
from users.models import UserProfile
class TestEdit(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/addon_3615',
'base/addon_5579', 'base/addon_3615_categories']
def setUp(self):
super(TestEdit, self).setUp()
addon = self.get_addon()
assert self.client.login(username='del@icio.us', password='password')
a = AddonCategory.objects.filter(addon=addon, category__id=22)[0]
a.feature = False
a.save()
AddonCategory.objects.filter(addon=addon,
category__id__in=[23, 24]).delete()
cache.clear()
self.url = reverse('devhub.addons.edit', args=[addon.slug])
self.user = UserProfile.objects.get(pk=55021)
self.tags = ['tag3', 'tag2', 'tag1']
for t in self.tags:
Tag(tag_text=t).save_tag(addon)
self.addon = self.get_addon()
def get_addon(self):
return Addon.objects.no_cache().get(id=3615)
def get_url(self, section, edit=False):
args = [self.addon.slug, section]
if edit:
args.append('edit')
return reverse('devhub.addons.section', args=args)
class TestEditBasic(TestEdit):
def setUp(self):
super(TestEditBasic, self).setUp()
self.basic_edit_url = self.get_url('basic', edit=True)
ctx = self.client.get(self.basic_edit_url).context
self.cat_initial = initial(ctx['cat_form'].initial_forms[0])
def test_redirect(self):
# /addon/:id => /addon/:id/edit
r = self.client.get('/en-US/developers/addon/3615/', follow=True)
url = reverse('devhub.addons.edit', args=['a3615'])
self.assertRedirects(r, url, 301)
def get_dict(self, **kw):
fs = formset(self.cat_initial, initial_count=1)
result = {'name': 'new name', 'slug': 'test_slug',
'summary': 'new summary',
'tags': ', '.join(self.tags)}
result.update(**kw)
result.update(fs)
return result
def test_edit(self):
old_name = self.addon.name
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
addon = self.get_addon()
eq_(unicode(addon.name), data['name'])
eq_(addon.name.id, old_name.id)
eq_(unicode(addon.slug), data['slug'])
eq_(unicode(addon.summary), data['summary'])
eq_([unicode(t) for t in addon.tags.all()], sorted(self.tags))
def test_edit_check_description(self):
# Make sure bug 629779 doesn't return.
old_desc = self.addon.description
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
addon = self.get_addon()
eq_(addon.description, old_desc)
def test_edit_slug_invalid(self):
old_edit = self.basic_edit_url
data = self.get_dict(name='', slug='invalid')
r = self.client.post(self.basic_edit_url, data)
doc = pq(r.content)
eq_(doc('form').attr('action'), old_edit)
def test_edit_slug_valid(self):
old_edit = self.basic_edit_url
data = self.get_dict(slug='valid')
r = self.client.post(self.basic_edit_url, data)
doc = pq(r.content)
assert doc('form').attr('action') != old_edit
def test_edit_summary_escaping(self):
data = self.get_dict()
data['summary'] = '<b>oh my</b>'
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
# Fetch the page so the LinkifiedTranslation gets in cache.
r = self.client.get(reverse('devhub.addons.edit', args=[data['slug']]))
eq_(pq(r.content)('[data-name=summary]').html().strip(),
'<span lang="en-us"><b>oh my</b></span>')
# Now make sure we don't have escaped content in the rendered form.
form = AddonFormBasic(instance=self.get_addon(), request=object())
eq_(pq('<body>%s</body>' % form['summary'])('[lang="en-us"]').html(),
'<b>oh my</b>')
def test_edit_as_developer(self):
self.client.login(username='regular@mozilla.com', password='password')
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
# Make sure we get errors when they are just regular users.
eq_(r.status_code, 403)
devuser = UserProfile.objects.get(pk=999)
AddonUser.objects.create(addon=self.get_addon(), user=devuser,
role=amo.AUTHOR_ROLE_DEV)
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
addon = self.get_addon()
eq_(unicode(addon.name), data['name'])
eq_(unicode(addon.slug), data['slug'])
eq_(unicode(addon.summary), data['summary'])
eq_([unicode(t) for t in addon.tags.all()], sorted(self.tags))
def test_edit_name_required(self):
data = self.get_dict(name='', slug='test_addon')
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'name', 'This field is required.')
def test_edit_name_spaces(self):
data = self.get_dict(name=' ', slug='test_addon')
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'name', 'This field is required.')
def test_edit_slugs_unique(self):
Addon.objects.get(id=5579).update(slug='test_slug')
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'slug', 'This slug is already in use.')
def test_edit_add_tag(self):
count = ActivityLog.objects.all().count()
self.tags.insert(0, 'tag4')
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
result = pq(r.content)('#addon_tags_edit').eq(0).text()
eq_(result, ', '.join(sorted(self.tags)))
eq_((ActivityLog.objects.for_addons(self.addon)
.get(action=amo.LOG.ADD_TAG.id)).to_string(),
'<a href="/en-US/firefox/tag/tag4">tag4</a> added to '
'<a href="/en-US/firefox/addon/test_slug/">new name</a>.')
eq_(ActivityLog.objects.filter(action=amo.LOG.ADD_TAG.id).count(),
count + 1)
def test_edit_blacklisted_tag(self):
Tag.objects.get_or_create(tag_text='blue', blacklisted=True)
data = self.get_dict(tags='blue')
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
error = 'Invalid tag: blue'
self.assertFormError(r, 'form', 'tags', error)
def test_edit_blacklisted_tags_2(self):
Tag.objects.get_or_create(tag_text='blue', blacklisted=True)
Tag.objects.get_or_create(tag_text='darn', blacklisted=True)
data = self.get_dict(tags='blue, darn, swearword')
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
error = 'Invalid tags: blue, darn'
self.assertFormError(r, 'form', 'tags', error)
def test_edit_blacklisted_tags_3(self):
Tag.objects.get_or_create(tag_text='blue', blacklisted=True)
Tag.objects.get_or_create(tag_text='darn', blacklisted=True)
Tag.objects.get_or_create(tag_text='swearword', blacklisted=True)
data = self.get_dict(tags='blue, darn, swearword')
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
error = 'Invalid tags: blue, darn, swearword'
self.assertFormError(r, 'form', 'tags', error)
def test_edit_remove_tag(self):
self.tags.remove('tag2')
count = ActivityLog.objects.all().count()
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
result = pq(r.content)('#addon_tags_edit').eq(0).text()
eq_(result, ', '.join(sorted(self.tags)))
eq_(ActivityLog.objects.filter(action=amo.LOG.REMOVE_TAG.id).count(),
count + 1)
def test_edit_minlength_tags(self):
tags = self.tags
tags.append('a' * (amo.MIN_TAG_LENGTH - 1))
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'tags',
'All tags must be at least %d characters.' %
amo.MIN_TAG_LENGTH)
def test_edit_max_tags(self):
tags = self.tags
for i in range(amo.MAX_TAGS + 1):
tags.append('test%d' % i)
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'tags', 'You have %d too many tags.' %
(len(tags) - amo.MAX_TAGS))
def test_edit_tag_empty_after_slug(self):
start = Tag.objects.all().count()
data = self.get_dict(tags='>>')
self.client.post(self.basic_edit_url, data)
# Check that the tag did not get created.
eq_(start, Tag.objects.all().count())
def test_edit_tag_slugified(self):
data = self.get_dict(tags='<script>alert("foo")</script>')
self.client.post(self.basic_edit_url, data)
tag = Tag.objects.all().order_by('-pk')[0]
eq_(tag.tag_text, 'scriptalertfooscript')
def test_edit_categories_add(self):
eq_([c.id for c in self.get_addon().all_categories], [22])
self.cat_initial['categories'] = [22, 23]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
eq_(sorted(addon_cats), [22, 23])
def _feature_addon(self, addon_id=3615):
c = CollectionAddon.objects.create(addon_id=addon_id,
collection=Collection.objects.create())
FeaturedCollection.objects.create(collection=c.collection,
application_id=amo.FIREFOX.id)
@mock.patch.object(settings, 'NEW_FEATURES', False)
def test_edit_categories_add_old_creatured(self):
"""Using the old features, categories should be able to be changed."""
self._feature_addon()
self.cat_initial['categories'] = [22, 23]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
# This add-on's categories should change.
eq_(sorted(addon_cats), [22, 23])
@mock.patch.object(settings, 'NEW_FEATURES', True)
def test_edit_categories_add_new_creatured(self):
"""Ensure that categories cannot be changed for creatured add-ons."""
self._feature_addon()
# TODO: remove this when NEW_FEATURES goes away. It's here because
# build() was already called in setUp().
from addons.cron import reset_featured_addons
reset_featured_addons()
self.cat_initial['categories'] = [22, 23]
r = self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
eq_(r.context['cat_form'].errors[0]['categories'],
['Categories cannot be changed while your add-on is featured for '
'this application.'])
# This add-on's categories should not change.
eq_(sorted(addon_cats), [22])
@mock.patch.object(settings, 'NEW_FEATURES', True)
def test_edit_categories_add_new_creatured_admin(self):
"""Ensure that admins can change categories for creatured add-ons."""
assert self.client.login(username='admin@mozilla.com',
password='password')
self._feature_addon()
from addons.cron import reset_featured_addons
reset_featured_addons()
r = self.client.get(self.basic_edit_url)
doc = pq(r.content)
eq_(doc('#addon-categories-edit div.addon-app-cats').length, 1)
eq_(doc('#addon-categories-edit > p').length, 0)
self.cat_initial['categories'] = [22, 23]
r = self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
eq_('categories' in r.context['cat_form'].errors[0], False)
# This add-on's categories should change.
eq_(sorted(addon_cats), [22, 23])
@mock.patch.object(settings, 'NEW_FEATURES', True)
def test_edit_categories_disable_new_creatured(self):
"""Ensure that other forms are okay when disabling category changes."""
self._feature_addon()
self.cat_initial['categories'] = [22, 23]
data = self.get_dict()
self.client.post(self.basic_edit_url, data)
eq_(unicode(self.get_addon().name), data['name'])
@mock.patch.object(settings, 'NEW_FEATURES', False)
def test_edit_categories_no_disclaimer_old(self):
"""With old features enabled, there should never be a disclaimer."""
self._feature_addon()
r = self.client.get(self.basic_edit_url)
doc = pq(r.content)
eq_(doc('#addon-categories-edit div.addon-app-cats').length, 1)
eq_(doc('#addon-categories-edit > p').length, 0)
@mock.patch.object(settings, 'NEW_FEATURES', True)
def test_edit_categories_no_disclaimer_new(self):
"""Ensure that there is a not disclaimer for non-creatured add-ons."""
r = self.client.get(self.basic_edit_url)
doc = pq(r.content)
eq_(doc('#addon-categories-edit div.addon-app-cats').length, 1)
eq_(doc('#addon-categories-edit > p').length, 0)
@mock.patch.object(settings, 'NEW_FEATURES', True)
def test_edit_categories_disclaimer(self):
"""Ensure that there is a disclaimer for creatured add-ons."""
self._feature_addon()
# TODO: remove this when NEW_FEATURES goes away. It's here because
# build() was already called in setUp().
from addons.cron import reset_featured_addons
reset_featured_addons()
r = self.client.get(self.basic_edit_url)
doc = pq(r.content)
eq_(doc('#addon-categories-edit div.addon-app-cats').length, 0)
eq_(doc('#addon-categories-edit > p').length, 2)
eq_(doc('#addon-categories-edit p.addon-app-cats').text(),
'Firefox: %s' % unicode(Category.objects.get(id=22).name))
def test_edit_categories_addandremove(self):
AddonCategory(addon=self.addon, category_id=23).save()
eq_([c.id for c in self.get_addon().all_categories], [22, 23])
self.cat_initial['categories'] = [22, 24]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
eq_(sorted(addon_cats), [22, 24])
def test_edit_categories_xss(self):
c = Category.objects.get(id=22)
c.name = '<script>alert("test");</script>'
c.save()
self.cat_initial['categories'] = [22, 24]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
assert '<script>alert' not in r.content
assert '<script>alert' in r.content
def test_edit_categories_remove(self):
c = Category.objects.get(id=23)
AddonCategory(addon=self.addon, category=c).save()
eq_([c.id for c in self.get_addon().all_categories], [22, 23])
self.cat_initial['categories'] = [22]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
eq_(sorted(addon_cats), [22])
def test_edit_categories_required(self):
del self.cat_initial['categories']
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
eq_(r.context['cat_form'].errors[0]['categories'],
['This field is required.'])
def test_edit_categories_max(self):
eq_(amo.MAX_CATEGORIES, 2)
self.cat_initial['categories'] = [22, 23, 24]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
eq_(r.context['cat_form'].errors[0]['categories'],
['You can have only 2 categories.'])
def test_edit_categories_other_failure(self):
Category.objects.get(id=22).update(misc=True)
self.cat_initial['categories'] = [22, 23]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
eq_(r.context['cat_form'].errors[0]['categories'],
['The miscellaneous category cannot be combined with additional '
'categories.'])
def test_edit_categories_nonexistent(self):
self.cat_initial['categories'] = [100]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
eq_(r.context['cat_form'].errors[0]['categories'],
['Select a valid choice. 100 is not one of the available '
'choices.'])
def test_edit_name_not_empty(self):
data = self.get_dict(name='', slug=self.addon.slug,
summary=self.addon.summary)
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'name', 'This field is required.')
def test_edit_name_max_length(self):
data = self.get_dict(name='xx' * 70, slug=self.addon.slug,
summary=self.addon.summary)
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'name',
'Ensure this value has at most 50 '
'characters (it has 140).')
def test_edit_summary_max_length(self):
data = self.get_dict(name=self.addon.name, slug=self.addon.slug,
summary='x' * 251)
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'summary',
'Ensure this value has at most 250 '
'characters (it has 251).')
def test_edit_restricted_tags(self):
addon = self.get_addon()
tag = Tag.objects.create(tag_text='restartless', restricted=True)
AddonTag.objects.create(tag=tag, addon=addon)
res = self.client.get(self.basic_edit_url)
divs = pq(res.content)('#addon_tags_edit .edit-addon-details')
eq_(len(divs), 2)
assert 'restartless' in divs.eq(1).text()
def test_text_not_none_when_has_flags(self):
addon = self.get_addon()
r = self.client.get(reverse('devhub.addons.edit',
kwargs=dict(addon_id=addon.slug)))
doc = pq(r.content)
eq_(doc('#addon-flags').text(), 'This is a site-specific add-on.')
def test_text_none_when_no_flags(self):
addon = self.get_addon()
addon.update(external_software=False, site_specific=False,
binary=False)
r = self.client.get(reverse('devhub.addons.edit',
kwargs=dict(addon_id=addon.slug)))
doc = pq(r.content)
eq_(doc('#addon-flags').text(), 'None')
def test_nav_links(self):
url = reverse('devhub.addons.edit', args=['a3615'])
activity_url = reverse('devhub.feed', args=['a3615'])
r = self.client.get(url)
doc = pq(r.content)
eq_(doc('#edit-addon-nav ul:last').find('li a').eq(1).attr('href'),
activity_url)
def get_l10n_urls(self):
paths = ('devhub.addons.edit', 'devhub.addons.profile',
'devhub.addons.payments', 'devhub.addons.owner')
return [reverse(p, args=['a3615']) for p in paths]
def test_l10n(self):
Addon.objects.get(id=3615).update(default_locale='en-US')
for url in self.get_l10n_urls():
r = self.client.get(url)
eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'en-us')
def test_l10n_not_us(self):
Addon.objects.get(id=3615).update(default_locale='fr')
for url in self.get_l10n_urls():
r = self.client.get(url)
eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'fr')
def test_l10n_not_us_id_url(self):
Addon.objects.get(id=3615).update(default_locale='fr')
for url in self.get_l10n_urls():
url = '/id' + url[6:]
r = self.client.get(url)
eq_(pq(r.content)('#l10n-menu').attr('data-default'), 'fr')
class TestEditMedia(TestEdit):
def setUp(self):
super(TestEditMedia, self).setUp()
self.media_edit_url = self.get_url('media', True)
self.icon_upload = reverse('devhub.addons.upload_icon',
args=[self.addon.slug])
self.preview_upload = reverse('devhub.addons.upload_preview',
args=[self.addon.slug])
self.old_settings = {'preview': settings.PREVIEW_THUMBNAIL_PATH,
'icons': settings.ADDON_ICONS_PATH}
settings.PREVIEW_THUMBNAIL_PATH = tempfile.mkstemp()[1] + '%s/%d.png'
settings.ADDON_ICONS_PATH = tempfile.mkdtemp()
def tearDown(self):
super(TestEditMedia, self).tearDown()
settings.PREVIEW_THUMBNAIL_PATH = self.old_settings['preview']
settings.ADDON_ICONS_PATH = self.old_settings['icons']
def formset_new_form(self, *args, **kw):
ctx = self.client.get(self.media_edit_url).context
blank = initial(ctx['preview_form'].forms[-1])
blank.update(**kw)
return blank
def formset_media(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.formset_new_form()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def test_edit_media_defaulticon(self):
data = dict(icon_type='')
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/default-64.png')
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_media_preuploadedicon(self):
data = dict(icon_type='icon/appearance')
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/appearance-64.png')
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_media_uploadedicon(self):
img = get_image_path('mozilla.png')
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
url = addon.get_icon_url(64)
assert ('addon_icon/%s' % addon.id) in url, (
'Unexpected path: %r' % url)
eq_(data['icon_type'], 'image/png')
# Check that it was actually uploaded
dirname = os.path.join(settings.ADDON_ICONS_PATH,
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-32.png' % addon.id)
assert os.path.exists(dest)
eq_(Image.open(dest).size, (32, 12))
def test_edit_media_icon_log(self):
self.test_edit_media_uploadedicon()
log = ActivityLog.objects.all()
eq_(log.count(), 1)
eq_(log[0].action, amo.LOG.CHANGE_ICON.id)
def test_edit_media_uploadedicon_noresize(self):
img = "%s/img/notifications/error.png" % settings.MEDIA_ROOT
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
addon_url = addon.get_icon_url(64).split('?')[0]
assert addon_url.endswith('images/addon_icon/%s-64.png' % addon.id), (
'Unexpected path: %r' % addon_url)
eq_(data['icon_type'], 'image/png')
# Check that it was actually uploaded
dirname = os.path.join(settings.ADDON_ICONS_PATH,
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-64.png' % addon.id)
assert os.path.exists(dest)
eq_(Image.open(dest).size, (48, 48))
def test_edit_media_uploadedicon_wrongtype(self):
img = "%s/js/zamboni/devhub.js" % settings.MEDIA_ROOT
src_image = open(img, 'rb')
data = {'upload_image': src_image}
res = self.client.post(self.preview_upload, data)
response_json = json.loads(res.content)
eq_(response_json['errors'][0], u'Icons must be either PNG or JPG.')
def setup_image_status(self):
addon = self.get_addon()
self.icon_dest = os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id)
os.makedirs(os.path.dirname(self.icon_dest))
open(self.icon_dest, 'w')
self.preview = addon.previews.create()
self.preview.save()
os.makedirs(os.path.dirname(self.preview.thumbnail_path))
open(self.preview.thumbnail_path, 'w')
self.url = reverse('devhub.ajax.image.status', args=[addon.slug])
def test_image_status_no_choice(self):
addon = self.get_addon()
addon.update(icon_type='')
url = reverse('devhub.ajax.image.status', args=[addon.slug])
result = json.loads(self.client.get(url).content)
assert result['icons']
def test_image_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_image_status_fails(self):
self.setup_image_status()
os.remove(self.icon_dest)
result = json.loads(self.client.get(self.url).content)
assert not result['icons']
def test_preview_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
# No previews means that all the images are done.
self.addon.previews.all().delete()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
def test_preview_status_fails(self):
self.setup_image_status()
os.remove(self.preview.thumbnail_path)
result = json.loads(self.client.get(self.url).content)
assert not result['previews']
def test_image_status_persona(self):
self.setup_image_status()
os.remove(self.icon_dest)
self.get_addon().update(type=amo.ADDON_PERSONA)
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_image_status_default(self):
self.setup_image_status()
os.remove(self.icon_dest)
self.get_addon().update(icon_type='icon/photos')
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_icon_animated(self):
filehandle = open(get_image_path('animated.png'), 'rb')
data = {'upload_image': filehandle}
res = self.client.post(self.preview_upload, data)
response_json = json.loads(res.content)
eq_(response_json['errors'][0], u'Icons cannot be animated.')
def preview_add(self, amount=1):
img = get_image_path('mozilla.png')
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
data_formset = self.formset_media(**data)
url = self.preview_upload
r = self.client.post(url, data_formset)
details = json.loads(r.content)
upload_hash = details['upload_hash']
# Create and post with the formset.
fields = []
for i in range(amount):
fields.append(self.formset_new_form(caption='hi',
upload_hash=upload_hash,
position=i))
data_formset = self.formset_media(*fields)
self.media_edit_url
r = self.client.post(self.media_edit_url, data_formset)
def test_edit_media_preview_add(self):
self.preview_add()
eq_(str(self.get_addon().previews.all()[0].caption), 'hi')
def test_edit_media_preview_edit(self):
self.preview_add()
preview = self.get_addon().previews.all()[0]
edited = {'caption': 'bye',
'upload_hash': '',
'id': preview.id,
'position': preview.position,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.media_edit_url, data_formset)
eq_(str(self.get_addon().previews.all()[0].caption), 'bye')
eq_(len(self.get_addon().previews.all()), 1)
def test_edit_media_preview_reorder(self):
self.preview_add(3)
previews = self.get_addon().previews.all()
base = dict(upload_hash='', file_upload=None)
# Three preview forms were generated; mix them up here.
a = dict(caption="first", position=1, id=previews[2].id)
b = dict(caption="second", position=2, id=previews[0].id)
c = dict(caption="third", position=3, id=previews[1].id)
a.update(base)
b.update(base)
c.update(base)
# Add them in backwards ("third", "second", "first")
data_formset = self.formset_media(c, b, a, initial_count=3)
eq_(data_formset['files-0-caption'], 'third')
eq_(data_formset['files-1-caption'], 'second')
eq_(data_formset['files-2-caption'], 'first')
self.client.post(self.media_edit_url, data_formset)
# They should come out "first", "second", "third"
eq_(self.get_addon().previews.all()[0].caption, 'first')
eq_(self.get_addon().previews.all()[1].caption, 'second')
eq_(self.get_addon().previews.all()[2].caption, 'third')
def test_edit_media_preview_delete(self):
self.preview_add()
preview = self.get_addon().previews.get()
edited = {'DELETE': 'checked',
'upload_hash': '',
'id': preview.id,
'position': 0,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.media_edit_url, data_formset)
eq_(len(self.get_addon().previews.all()), 0)
def test_edit_media_preview_add_another(self):
self.preview_add()
self.preview_add()
eq_(len(self.get_addon().previews.all()), 2)
def test_edit_media_preview_add_two(self):
self.preview_add(2)
eq_(len(self.get_addon().previews.all()), 2)
class TestEditDetails(TestEdit):
def setUp(self):
super(TestEditDetails, self).setUp()
self.details_url = self.get_url('details')
self.details_edit_url = self.get_url('details', edit=True)
def test_edit(self):
data = dict(description='New description with <em>html</em>!',
default_locale='en-US',
homepage='http://twitter.com/fligtarsmom')
r = self.client.post(self.details_edit_url, data)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_xss(self):
"""
Let's try to put xss in our description, and safe html, and verify
that we are playing safe.
"""
self.addon.description = ("This\n<b>IS</b>"
"<script>alert('awesome')</script>")
self.addon.save()
r = self.client.get(reverse('devhub.addons.edit',
args=[self.addon.slug]))
doc = pq(r.content)
eq_(doc('#edit-addon-details span[lang]').html(),
"This<br/><b>IS</b><script>alert('awesome')"
'</script>')
def test_edit_homepage_optional(self):
data = dict(description='New description with <em>html</em>!',
default_locale='en-US', homepage='')
r = self.client.post(self.details_edit_url, data)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_default_locale_required_trans(self):
# name, summary, and description are required in the new locale.
description, homepage = map(unicode, [self.addon.description,
self.addon.homepage])
# TODO: description should get fixed up with the form.
fields = ['description', 'name', 'summary']
error = ('Before changing your default locale you must have a name, '
'summary, and description in that locale. '
'You are missing %s.')
missing = lambda f: error % ', '.join(map(repr, f))
d = dict(description=description, homepage=homepage,
default_locale='fr')
r = self.client.post(self.details_edit_url, d)
self.assertFormError(r, 'form', None, missing(fields))
# Now we have a name.
self.addon.name = {'fr': 'fr name'}
self.addon.save()
fields.remove('name')
r = self.client.post(self.details_edit_url, d)
self.assertFormError(r, 'form', None, missing(fields))
# Now we have a summary.
self.addon.summary = {'fr': 'fr summary'}
self.addon.save()
fields.remove('summary')
r = self.client.post(self.details_edit_url, d)
self.assertFormError(r, 'form', None, missing(fields))
# Now we're sending an fr description with the form.
d['description_fr'] = 'fr description'
r = self.client.post(self.details_edit_url, d)
eq_(r.context['form'].errors, {})
def test_edit_default_locale_frontend_error(self):
d = dict(description='xx', homepage='yy', default_locale='fr')
r = self.client.post(self.details_edit_url, d)
self.assertContains(r, 'Before changing your default locale you must')
def test_edit_locale(self):
addon = self.get_addon()
addon.update(default_locale='en-US')
r = self.client.get(self.details_url)
eq_(pq(r.content)('.addon_edit_locale').eq(0).text(), 'English (US)')
class TestEditSupport(TestEdit):
def setUp(self):
super(TestEditSupport, self).setUp()
self.support_url = self.get_url('support')
self.support_edit_url = self.get_url('support', edit=True)
def test_edit_support(self):
data = dict(support_email='sjobs@apple.com',
support_url='http://apple.com/')
r = self.client.post(self.support_edit_url, data)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_support_premium(self):
self.get_addon().update(premium_type=amo.ADDON_PREMIUM)
data = dict(support_email='sjobs@apple.com',
support_url='')
r = self.client.post(self.support_edit_url, data)
eq_(r.context['form'].errors, {})
eq_(self.get_addon().support_email, data['support_email'])
def test_edit_support_premium_required(self):
self.get_addon().update(premium_type=amo.ADDON_PREMIUM)
data = dict(support_url='')
r = self.client.post(self.support_edit_url, data)
assert 'support_email' in r.context['form'].errors
def test_edit_support_getsatisfaction(self):
urls = [("http://getsatisfaction.com/abc/products/def", 'abcdef'),
("http://getsatisfaction.com/abc/", 'abc'), # No company
("http://google.com", None)] # Delete GS
for (url, val) in urls:
data = dict(support_email='abc@def.com', support_url=url)
r = self.client.post(self.support_edit_url, data)
eq_(r.context['form'].errors, {})
result = pq(r.content)('.addon_edit_gs').eq(0).text()
doc = pq(r.content)
result = doc('.addon_edit_gs').eq(0).text()
result = re.sub('\W', '', result) if result else None
eq_(result, val)
def test_edit_support_optional_url(self):
data = dict(support_email='sjobs@apple.com',
support_url='')
r = self.client.post(self.support_edit_url, data)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_support_optional_email(self):
data = dict(support_email='',
support_url='http://apple.com/')
r = self.client.post(self.support_edit_url, data)
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
class TestEditTechnical(TestEdit):
fixtures = TestEdit.fixtures + ['addons/persona', 'base/addon_40',
'base/addon_1833_yoono',
'base/addon_4664_twitterbar.json',
'base/addon_5299_gcal', 'base/addon_6113']
def setUp(self):
super(TestEditTechnical, self).setUp()
self.dependent_addon = Addon.objects.get(id=5579)
Flag.objects.create(name='edit-dependencies', everyone=True)
AddonDependency.objects.create(addon=self.addon,
dependent_addon=self.dependent_addon)
self.technical_url = self.get_url('technical')
self.technical_edit_url = self.get_url('technical', edit=True)
ctx = self.client.get(self.technical_edit_url).context
self.dep = initial(ctx['dependency_form'].initial_forms[0])
self.dep_initial = formset(self.dep, prefix='dependencies',
initial_count=1)
def dep_formset(self, *args, **kw):
kw.setdefault('initial_count', 1)
kw.setdefault('prefix', 'dependencies')
return formset(self.dep, *args, **kw)
def formset(self, data):
return self.dep_formset(**data)
def test_log(self):
data = self.formset({'developer_comments': 'This is a test'})
o = ActivityLog.objects
eq_(o.count(), 0)
r = self.client.post(self.technical_edit_url, data)
eq_(r.context['form'].errors, {})
eq_(o.filter(action=amo.LOG.EDIT_PROPERTIES.id).count(), 1)
def test_technical_on(self):
# Turn everything on
data = dict(developer_comments='Test comment!',
binary='on',
external_software='on',
site_specific='on',
view_source='on')
r = self.client.post(self.technical_edit_url, self.formset(data))
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
if k == 'developer_comments':
eq_(unicode(getattr(addon, k)), unicode(data[k]))
else:
eq_(getattr(addon, k), True if data[k] == 'on' else False)
# Andddd offf
data = dict(developer_comments='Test comment!')
r = self.client.post(self.technical_edit_url, self.formset(data))
addon = self.get_addon()
eq_(addon.binary, False)
eq_(addon.external_software, False)
eq_(addon.site_specific, False)
eq_(addon.view_source, False)
def test_technical_devcomment_notrequired(self):
data = dict(developer_comments='',
binary='on',
external_software='on',
site_specific='on',
view_source='on')
r = self.client.post(self.technical_edit_url, self.formset(data))
eq_(r.context['form'].errors, {})
addon = self.get_addon()
for k in data:
if k == 'developer_comments':
eq_(unicode(getattr(addon, k)), unicode(data[k]))
else:
eq_(getattr(addon, k), True if data[k] == 'on' else False)
def test_auto_repackage_not_shown(self):
f = self.addon.current_version.all_files[0]
f.jetpack_version = None
f.save()
r = self.client.get(self.technical_edit_url)
self.assertNotContains(r, 'Upgrade SDK?')
def test_auto_repackage_shown(self):
f = self.addon.current_version.all_files[0]
f.jetpack_version = '1.0'
f.save()
r = self.client.get(self.technical_edit_url)
self.assertContains(r, 'Upgrade SDK?')
def test_dependencies_overview(self):
eq_([d.id for d in self.addon.all_dependencies], [5579])
r = self.client.get(self.technical_url)
req = pq(r.content)('td#required-addons')
eq_(req.length, 1)
eq_(req.attr('data-src'),
reverse('devhub.ajax.dependencies', args=[self.addon.slug]))
eq_(req.find('li').length, 1)
a = req.find('a')
eq_(a.attr('href'), self.dependent_addon.get_url_path())
eq_(a.text(), unicode(self.dependent_addon.name))
def test_dependencies_initial(self):
r = self.client.get(self.technical_edit_url)
form = pq(r.content)('#required-addons .dependencies li[data-addonid]')
eq_(form.length, 1)
eq_(form.find('input[id$=-dependent_addon]').val(),
str(self.dependent_addon.id))
div = form.find('div')
eq_(div.attr('style'),
'background-image:url(%s)' % self.dependent_addon.icon_url)
a = div.find('a')
eq_(a.attr('href'), self.dependent_addon.get_url_path())
eq_(a.text(), unicode(self.dependent_addon.name))
def test_dependencies_add(self):
addon = Addon.objects.get(id=5299)
eq_(addon.type, amo.ADDON_EXTENSION)
eq_(addon in list(Addon.objects.reviewed()), True)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
eq_(any(r.context['dependency_form'].errors), False)
self.check_dep_ids([self.dependent_addon.id, addon.id])
r = self.client.get(self.technical_edit_url)
reqs = pq(r.content)('#required-addons .dependencies')
eq_(reqs.find('li[data-addonid]').length, 2)
req = reqs.find('li[data-addonid=5299]')
eq_(req.length, 1)
a = req.find('div a')
eq_(a.attr('href'), addon.get_url_path())
eq_(a.text(), unicode(addon.name))
def test_dependencies_limit(self):
deps = Addon.objects.reviewed().exclude(
Q(id__in=[self.addon.id, self.dependent_addon.id]) |
Q(type=amo.ADDON_PERSONA))
args = []
eq_(deps.count(), 4) # The limit is 3.
for dep in deps:
args.append({'dependent_addon': dep.id})
d = self.dep_formset(*args)
r = self.client.post(self.technical_edit_url, d)
eq_(r.context['dependency_form'].non_form_errors(),
['There cannot be more than 3 required add-ons.'])
# Check error message for apps.
Addon.objects.all().update(type=amo.ADDON_WEBAPP)
r = self.client.post(self.technical_edit_url, d)
eq_(r.context['dependency_form'].non_form_errors(),
['There cannot be more than 3 required apps.'])
def test_dependencies_limit_with_deleted_form(self):
deps = Addon.objects.reviewed().exclude(
Q(id__in=[self.addon.id, self.dependent_addon.id]) |
Q(type=amo.ADDON_PERSONA))[:3]
args = []
for dep in deps:
args.append({'dependent_addon': dep.id})
# If we delete one form and add three, everything should be A-OK.
self.dep['DELETE'] = True
d = self.dep_formset(*args)
r = self.client.post(self.technical_edit_url, d)
eq_(any(r.context['dependency_form'].errors), False)
self.check_dep_ids(deps.values_list('id', flat=True))
def check_dep_ids(self, expected=[]):
a = AddonDependency.objects.values_list('dependent_addon__id',
flat=True)
eq_(sorted(list(a)), sorted(expected))
def check_bad_dep(self, r):
"""This helper checks that bad dependency data doesn't go through."""
eq_(r.context['dependency_form'].errors[1]['dependent_addon'],
['Select a valid choice. That choice is not one of the available '
'choices.'])
self.check_dep_ids([self.dependent_addon.id])
def test_dependencies_add_reviewed(self):
"""Ensure that reviewed add-ons can be made as dependencies."""
addon = Addon.objects.get(id=40)
for status in amo.REVIEWED_STATUSES:
addon.update(status=status)
eq_(addon in list(Addon.objects.reviewed()), True)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
eq_(any(r.context['dependency_form'].errors), False)
self.check_dep_ids([self.dependent_addon.id, addon.id])
AddonDependency.objects.get(dependent_addon=addon).delete()
def test_dependencies_no_add_unreviewed(self):
"""Ensure that unreviewed add-ons cannot be made as dependencies."""
addon = Addon.objects.get(id=40)
for status in amo.UNREVIEWED_STATUSES:
addon.update(status=status)
eq_(addon in list(Addon.objects.reviewed()), False)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_no_add_reviewed_persona(self):
"""Ensure that reviewed Personas cannot be made as dependencies."""
addon = Addon.objects.get(id=15663)
eq_(addon.type, amo.ADDON_PERSONA)
eq_(addon in list(Addon.objects.reviewed()), True)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_no_add_unreviewed_persona(self):
"""Ensure that unreviewed Personas cannot be made as dependencies."""
addon = Addon.objects.get(id=15663)
addon.update(status=amo.STATUS_UNREVIEWED)
eq_(addon.status, amo.STATUS_UNREVIEWED)
eq_(addon in list(Addon.objects.reviewed()), False)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_edit_app_dependencies(self):
"""Apps should be able to add app dependencies."""
Addon.objects.all().update(type=amo.ADDON_WEBAPP)
addon = Addon.objects.get(id=5299)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
eq_(any(r.context['dependency_form'].errors), False)
self.check_dep_ids([self.dependent_addon.id, addon.id])
def test_edit_addon_dependencies_no_add_apps(self):
"""Add-ons should not be able to add app dependencies."""
addon = Addon.objects.get(id=5299)
addon.update(type=amo.ADDON_WEBAPP)
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_edit_app_dependencies_no_add_addons(self):
"""Apps should not be able to add add-on dependencies."""
addon = Addon.objects.get(id=5299)
addon.update
d = self.dep_formset({'dependent_addon': True})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_add_self(self):
"""Ensure that an add-on cannot be made dependent on itself."""
d = self.dep_formset({'dependent_addon': self.addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_add_invalid(self):
"""Ensure that a non-existent add-on cannot be a dependency."""
d = self.dep_formset({'dependent_addon': 9999})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_add_duplicate(self):
"""Ensure that an add-on cannot be made dependent more than once."""
d = self.dep_formset({'dependent_addon': self.dependent_addon.id})
r = self.client.post(self.technical_edit_url, d)
eq_(r.context['dependency_form'].forms[1].non_field_errors(),
['Addon dependency with this Addon and Dependent addon already '
'exists.'])
self.check_dep_ids([self.dependent_addon.id])
def test_dependencies_delete(self):
self.dep['DELETE'] = True
d = self.dep_formset(total_count=1, initial_count=1)
r = self.client.post(self.technical_edit_url, d)
eq_(any(r.context['dependency_form'].errors), False)
self.check_dep_ids()
def test_dependencies_add_delete(self):
"""Ensure that we can both delete a dependency and add another."""
self.dep['DELETE'] = True
d = self.dep_formset({'dependent_addon': 5299})
r = self.client.post(self.technical_edit_url, d)
eq_(any(r.context['dependency_form'].errors), False)
self.check_dep_ids([5299])
class TestAdmin(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/addon_3615']
def login_admin(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
def login_user(self):
assert self.client.login(username='del@icio.us', password='password')
def test_show_admin_settings_admin(self):
self.login_admin()
url = reverse('devhub.addons.edit', args=['a3615'])
r = self.client.get(url)
eq_(r.status_code, 200)
self.assertContains(r, 'Admin Settings')
def test_show_admin_settings_nonadmin(self):
self.login_user()
url = reverse('devhub.addons.edit', args=['a3615'])
r = self.client.get(url)
eq_(r.status_code, 200)
self.assertNotContains(r, 'Admin Settings')
def test_post_as_admin(self):
self.login_admin()
url = reverse('devhub.addons.admin', args=['a3615'])
r = self.client.post(url)
eq_(r.status_code, 200)
def test_post_as_nonadmin(self):
self.login_user()
url = reverse('devhub.addons.admin', args=['a3615'])
r = self.client.post(url)
eq_(r.status_code, 403)
|
{
"content_hash": "44347aa9f8454cc517b1bac497350808",
"timestamp": "",
"source": "github",
"line_count": 1337,
"max_line_length": 79,
"avg_line_length": 39.58489154824233,
"alnum_prop": 0.5861880018894662,
"repo_name": "jbalogh/zamboni",
"id": "b49c303e689b9de6f6ce609c46a71c6c28d6cf5f",
"size": "52925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/devhub/tests/test_views_edit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "JavaScript",
"bytes": "1553612"
},
{
"name": "Python",
"bytes": "2860649"
},
{
"name": "Shell",
"bytes": "8095"
}
],
"symlink_target": ""
}
|
import os
import sys
import pyx
import argparse
import layouts
import samplers
import data_loader
def main(arguments):
parser = argparse.ArgumentParser(description='Generate clustering visualizations.')
parser.add_argument(
'--tables_dir', dest='tables_dir', default='input/tables',
help='contents of the "individual_stages" subdirectory of results (default: "input/tables")'
)
parser.add_argument(
'--clusters_dir', dest='clusters_dir', default='input/clusters',
help='directory containing the cluster assignment for each stage (default: "input/clusters")'
)
parser.add_argument(
'--output_dir', dest='output_dir', default='output',
help='output directory to write the results (default: "output")'
)
parser.add_argument(
'--layout', dest='layout', required=True, choices=layouts.available_names(),
help='layout to use for displaying clusters'
)
parser.add_argument(
'--sampler', dest='sampler', required=True, choices=samplers.available_names(),
help='algorithm for selecting which items and in what order to show'
)
parser.add_argument(
'-n', dest='total_items', type=int, default=200,
help='total number of items to show'
)
args = parser.parse_args(arguments[1:])
if os.path.exists(args.output_dir):
print('ERROR: the directory "{}" already exists, please use another name'.format(
args.output_dir), file=sys.stderr
)
return 1
else:
os.makedirs(args.output_dir)
layout_class = layouts.from_name(args.layout)
sampler_class = samplers.from_name(args.sampler)
clusters = data_loader.DataLoader(args.tables_dir, args.clusters_dir).results
for k, v in clusters.items():
a_layout = layout_class()
a_sampler = sampler_class(v, args.total_items)
figure = pyx.canvas.canvas()
a_layout.draw(a_sampler, figure)
canvas = pyx.canvas.canvas()
transform = pyx.trafo.scale(40, 40)
canvas.insert(figure, [transform])
canvas.writePDFfile("{}/{}".format(args.output_dir, k))
return 0
if __name__ == "__main__":
status = main(sys.argv)
sys.exit(status)
|
{
"content_hash": "c374fed32bfa7a1265575a87a1e79a33",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 101,
"avg_line_length": 33.43283582089552,
"alnum_prop": 0.6464285714285715,
"repo_name": "alepulver/my-thesis",
"id": "cfef7e9cdfb6e377e4765c73fb2d357e5e0e590f",
"size": "2240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "results-visualizations/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "285"
},
{
"name": "CoffeeScript",
"bytes": "44733"
},
{
"name": "HTML",
"bytes": "15888"
},
{
"name": "JavaScript",
"bytes": "2189"
},
{
"name": "Python",
"bytes": "143171"
},
{
"name": "R",
"bytes": "71982"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Text, Integer
from sqlalchemy.schema import Index, UniqueConstraint
from freight.config import db
LOG_CHUNK_SIZE = 4096
class LogChunk(db.Model):
__tablename__ = 'logchunk'
__table_args__ = (
Index('idx_logchunk_task_id', 'task_id'),
UniqueConstraint('task_id', 'offset', name='unq_logchunk_source_offset'),
)
id = Column(Integer, primary_key=True)
task_id = Column(Integer, ForeignKey('task.id', ondelete="CASCADE"), nullable=False)
# offset is sum(c.size for c in chunks_before_this)
offset = Column(Integer, nullable=False)
# size is len(text)
size = Column(Integer, nullable=False)
text = Column(Text, nullable=False)
date_created = Column(DateTime, default=datetime.utcnow, nullable=False)
|
{
"content_hash": "2d35647e36dc453432e2fffb5d9ab259",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 33.407407407407405,
"alnum_prop": 0.7028824833702882,
"repo_name": "jkimbo/freight",
"id": "7492ac87b27370a478f1cdf21f72906c035db593",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freight/models/logchunk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6901"
},
{
"name": "HTML",
"bytes": "918"
},
{
"name": "JavaScript",
"bytes": "22742"
},
{
"name": "Makefile",
"bytes": "808"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "146824"
},
{
"name": "Shell",
"bytes": "392"
}
],
"symlink_target": ""
}
|
__file__ = '__init__.py'
__date__ = '5/16/2016'
__author__ = 'ABREZNIC'
"""
The MIT License (MIT)
Copyright (c) 2016 Texas Department of Transportation
Author: Adam Breznicky
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
|
{
"content_hash": "4cea93fcb38c89e2a3c4459ffda3b4ee",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 44.55555555555556,
"alnum_prop": 0.7822111388196176,
"repo_name": "adambreznicky/python",
"id": "3f057776324a4fde05137f888a6bef88f9f15bd9",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QC/pre_GRID/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2498272"
},
{
"name": "Visual Basic",
"bytes": "40594"
}
],
"symlink_target": ""
}
|
import time
import mock
from ceilometer.compute import manager
from ceilometer.compute.pollsters import cpu
from ceilometer.compute.virt import inspector as virt_inspector
from . import base
class TestCPUPollster(base.TestPollsterBase):
def setUp(self):
super(TestCPUPollster, self).setUp()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=1 * (10 ** 6), number=2))
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=3 * (10 ** 6), number=2))
# cpu_time resets on instance restart
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=2 * (10 ** 6), number=2))
self.mox.ReplayAll()
mgr = manager.AgentManager()
pollster = cpu.CPUPollster()
def _verify_cpu_metering(expected_time):
cache = {}
samples = list(pollster.get_samples(mgr, cache, self.instance))
self.assertEqual(len(samples), 1)
self.assertEqual(set([s.name for s in samples]),
set(['cpu']))
assert samples[0].volume == expected_time
self.assertEqual(samples[0].resource_metadata.get('cpu_number'), 2)
# ensure elapsed time between polling cycles is non-zero
time.sleep(0.001)
_verify_cpu_metering(1 * (10 ** 6))
_verify_cpu_metering(3 * (10 ** 6))
_verify_cpu_metering(2 * (10 ** 6))
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples_no_caching(self):
self.inspector.inspect_cpus(self.instance.name).AndReturn(
virt_inspector.CPUStats(time=1 * (10 ** 6), number=2))
self.mox.ReplayAll()
mgr = manager.AgentManager()
pollster = cpu.CPUPollster()
cache = {}
samples = list(pollster.get_samples(mgr, cache, self.instance))
self.assertEqual(len(samples), 1)
self.assertEqual(samples[0].volume, 10 ** 6)
self.assertEqual(len(cache), 0)
|
{
"content_hash": "edcae3fc8d002d0db7bf147e4d5889a4",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 37.49152542372882,
"alnum_prop": 0.6265822784810127,
"repo_name": "rickerc/ceilometer_audit",
"id": "47002f8dc53c0b37172cd0c2cf8297dc8f381fee",
"size": "2965",
"binary": false,
"copies": "3",
"ref": "refs/heads/cis-havana-staging",
"path": "tests/compute/pollsters/test_cpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "64962"
},
{
"name": "Python",
"bytes": "1810243"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
}
|
"""
"""
from __future__ import unicode_literals, absolute_import, with_statement
from contextlib import contextmanager
import unittest
# This env var tells the config loader to use the special test config located at:
# $project_root/tests/fixtures/test_config.ini
# This file must be created from the example config (test_config_dist.ini) in the same
# directory
import os
os.environ['TEST'] = "1"
import pickle
from os.path import join, dirname
from sqlalchemy import create_engine
from tranny.app import config, Session, Base
from tranny.configuration import Configuration
import vcr
def get_fixture(fixture_file):
return join(dirname(__file__), "fixtures", fixture_file)
class Pickler(object):
"""
Pickled based serializer
"""
def serialize(self, obj):
return pickle.dumps(obj)
def deserialize(self, s):
return pickle.loads(s)
tapedeck = vcr.VCR(
serializer='pickle',
cassette_library_dir=get_fixture('cassettes'),
record_mode='all'
)
tapedeck.register_serializer('pickle', Pickler())
def _make_config():
c = Configuration()
config_file = get_fixture("test_config.ini")
c.initialize(config_file)
return c
@contextmanager
def client_env(track, live_test=False, **kwargs):
""" Context manager wrapper for simple integration with vcrpy. Since we want to sometimes test
against a live client this will wrap and yield the vcrpy context call only when the live_test
value is False
:param track: Name of fixture passed to vcrpy
:type track: unicode
:param live_test: Is this a live test
:type live_test: bool
:param kwargs: Extra vcrpy use_cassette parameters
"""
if live_test:
yield
else:
with tapedeck.use_cassette(track, **kwargs):
yield
class TrannyTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TrannyTestCase, self).__init__(methodName=methodName)
self.client_name = ''
config.initialize()
def run_data_set(self, test_data, fn):
for expected, data in test_data:
self.assertEqual(expected, fn(data), data)
def track(self, track_name):
if self.client_name:
return self.client_name + "/" + track_name + ".pickle"
return track_name + ".pickle"
class TrannyDBTestCase(TrannyTestCase):
def __init__(self, methodName='runTest'):
super(TrannyDBTestCase, self).__init__(methodName=methodName)
self.init_db()
def init_db(self, uri="sqlite://"):
Session.remove()
engine = create_engine(uri)
Session.configure(bind=engine)
Base.metadata.create_all(bind=engine)
|
{
"content_hash": "df7f28326eab4061d03b41cc283fa95e",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 98,
"avg_line_length": 27.65979381443299,
"alnum_prop": 0.6757361162877376,
"repo_name": "leighmacdonald/tranny",
"id": "887e5bba243a642610f4f57d9a289bfd616dd6b7",
"size": "2707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testcase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "110247"
},
{
"name": "CoffeeScript",
"bytes": "76122"
},
{
"name": "JavaScript",
"bytes": "45559"
},
{
"name": "Python",
"bytes": "323593"
},
{
"name": "Shell",
"bytes": "7485"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, division
from argparse import ArgumentDefaultsHelpFormatter
def func(args, parser):
# delay import of the rest of the module to improve `osprey -h` performance
from ..execute_worker import execute
execute(args, parser)
def configure_parser(sub_parsers):
help = 'Run a worker process (hyperparameter optimization)'
p = sub_parsers.add_parser('worker', description=help, help=help,
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('config', help='Path to worker config file (yaml)')
p.add_argument('-n', '--n-iters', default=1, type=int, help='Number of '
'trials to run sequentially.')
p.set_defaults(func=func)
|
{
"content_hash": "debae12ce43e0aecc6f8b270fde2fec8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 42.55555555555556,
"alnum_prop": 0.6840731070496083,
"repo_name": "cxhernandez/osprey",
"id": "1b03eaa75d1aee551f363fcba0197ce33eaa2183",
"size": "766",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "osprey/cli/parser_worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "141212"
},
{
"name": "Shell",
"bytes": "4545"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.conf import settings
import os
#This middleware adds header REMOTE_USER with current REMOTE_USER from settings to every request.
#This is required when running app with uwsgi locally (with runserver this is unnecessary)
#In production, when FAKE_LOGIN=False, the REMOTE_USER header should be set by sso
class SetUserMiddleware():
def process_request(self, request):
if settings.FAKE_LOGIN:
request.META['REMOTE_USER'] = os.getenv('REMOTE_USER')
class CustomHeaderMiddleware(RemoteUserMiddleware):
header = os.getenv('REMOTE_USER_HEADER', 'REMOTE_USER')
|
{
"content_hash": "3bee67a8622a0d27c075a9741cde904d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 97,
"avg_line_length": 43.93333333333333,
"alnum_prop": 0.7708649468892261,
"repo_name": "futurice/schedule",
"id": "3864ca3283798fbc44b523151dc1fbd173863b79",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "schedulesite/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4704"
},
{
"name": "Dockerfile",
"bytes": "1367"
},
{
"name": "HTML",
"bytes": "5543"
},
{
"name": "JavaScript",
"bytes": "165591"
},
{
"name": "Python",
"bytes": "94435"
},
{
"name": "Shell",
"bytes": "1748"
}
],
"symlink_target": ""
}
|
import requests
import json
import sys
if len(sys.argv) == 1:
# print 'Options: \n -s : check services \n -h : check hosts \n -m : check
# manager service'
sys.exit(1)
OUTPUT = '{0}: {1}'
clusters = requests.get(
'http://admin:admin@{0}:7180/api/v9/clusters'.format(
sys.argv[1]), auth=(
'admin', 'admin'))
name = json.loads(clusters.text)['items'][0]['name']
result = 'Critical'
description = ' '
if '-s' in sys.argv:
description += 'Hadoop Services ( '
# services
services = requests.get(
'http://admin:admin@{0}:7180/api/v9/clusters/{1}/services'.format(
sys.argv[1], name), auth=(
'admin', 'admin'))
items = json.loads(services.text)['items']
if items:
for item in items:
description += '{0}:{1} '.format(
item['name'],
item['healthSummary'])
result = 'OK'
if 'BAD' in description:
result = 'Critical'
if 'NOT_AVAILABLE' in description:
result = 'Critical'
if 'CONCERNING' in description:
result = 'Concerning'
else:
description += 'no CDH services available'
if '-h' in sys.argv:
description += 'Hosts ( '
# hosts
hosts = requests.get(
'http://admin:admin@{0}:7180/api/v9/hosts?view=full'.format(
sys.argv[1]), auth=(
'admin', 'admin'))
hostList = json.loads(hosts.text)['items']
if hostList:
for host in hostList:
description += '{0}:{1} '.format(
host['hostname'],
host['healthSummary'])
result = 'OK'
if 'BAD' in description:
result = 'Critical'
if 'NOT_AVAILABLE' in description:
result = 'Critical'
if 'CONCERNING' in description:
result = 'Concerning'
else:
description += 'no CDH hosts available'
if '-m' in sys.argv:
description += 'Manager Service Status ( '
# manager service status
manager_status = requests.get(
'http://admin:admin@{0}:7180/api/v9/cm/service'.format(
sys.argv[1]), auth=(
'admin', 'admin'))
manager_service_status = json.loads(manager_status.text)
description += '{0}:{1} '.format(
manager_service_status['name'],
manager_service_status['healthSummary'])
result = 'OK'
if 'BAD' in description:
result = 'Critical'
if 'NOT_AVAILABLE' in description:
result = 'Critical'
if 'CONCERNING' in description:
result = 'Concerning'
description += ')'
print OUTPUT.format(result, description)
status = 2
if result == 'OK':
status = 0
sys.exit(status)
|
{
"content_hash": "56c92669279fa253ea1acec465f8365f",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 22.37704918032787,
"alnum_prop": 0.5505494505494506,
"repo_name": "trustedanalytics/platform-ansible",
"id": "eaa17e12ba9572973c06c20d5145915d06e48851",
"size": "3340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/zabbix_base_proxy/files/healthCheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23338"
},
{
"name": "Nginx",
"bytes": "1331"
},
{
"name": "PHP",
"bytes": "566"
},
{
"name": "Python",
"bytes": "69357"
},
{
"name": "Shell",
"bytes": "51660"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
import os
import logging
from sys import stdout
def change_dir(base_path, full_exp_name):
path = os.path.join(base_path, full_exp_name)
try:
os.mkdir(path)
except OSError as exception:
if exception.errno == 17:
print(path, "already exists. Reusing directory.")
else:
raise
os.chdir(path)
def configure_logger(full_exp_name):
logger = logging.getLogger(full_exp_name)
if not logger.handlers:
fh = logging.FileHandler(full_exp_name + '.log')
formatter = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(logging.StreamHandler(stream=stdout))
logger.setLevel(logging.DEBUG)
logger.info("***********************************")
logger.info("Preparing " + full_exp_name + "...")
def init_experiment(base_path, experiment, full_exp_name):
"""
Parameters
----------
base_path : str
full_exp_name : str
Returns
-------
func_call : str
"""
change_dir(base_path, full_exp_name)
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
configure_logger(full_exp_name)
return func_call
def run_experiment(net, epochs):
net.print_net()
net.compile()
fit(net, epochs)
def fit(net, epochs):
print("Running net.fit for", net.experiment_name)
try:
net.fit(epochs)
except KeyboardInterrupt:
print("Keyboard interrupt received.")
menu(net, epochs)
def menu(net, epochs):
# Print menu
print("")
print("------------------ OPTIONS ------------------")
print("d: Enter debugger.")
print("s: Save plots and params.")
print("q: Quit all experiments.")
print("e: Change number of epochs to train this net (currently {})."
.format(epochs))
print("c: Continue training.")
print("")
# Get input
selection_str = raw_input("Please enter one or more letters: ")
# Handle input
for selection in selection_str:
if selection == 'd':
import ipdb
ipdb.set_trace()
elif selection == 's':
net.save()
elif selection == 'q':
sure = raw_input("Are you sure you want to quit [Y/n]? ")
if sure.lower() != 'n':
raise
elif selection == 'e':
new_epochs = raw_input("New number of epochs (or 'None'): ")
if new_epochs == 'None':
epochs = None
else:
try:
epochs = int(new_epochs)
except:
print("'{}' not an integer!".format(new_epochs))
elif selection == 'c':
break
else:
print("Selection '{}' not recognised!".format(selection))
break
print("Continuing training for {} epochs...".format(epochs))
fit(net, epochs)
|
{
"content_hash": "1e88159c7dd9c3ded4857341fedd2898",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 72,
"avg_line_length": 28.257142857142856,
"alnum_prop": 0.5540950455005056,
"repo_name": "mmottahedi/neuralnilm_prototype",
"id": "929c10f249714508967faa7bfdeff22b89ad7376",
"size": "2967",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neuralnilm/experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
}
|
import objc; objc.setVerbose(1)
from AppKit import *
from Foundation import *
from PyObjCTools import AppHelper
from MyDocument import *
from LaunchServices import *
from PreferencesWindowController import *
PYTHON_EXTENSIONS = [u'py', 'pyw', u'pyc']
FILE_TYPE_BINDING_MESSAGE = u"""
%s is not the default application for all Python script types. You should fix this with the Finder's "Get Info" command.
See "Changing the application that opens a file" in Mac Help for details.
""".strip()
class MyAppDelegate(NSObject):
def init(self):
self = super(MyAppDelegate, self).init()
self.initial_action_done = False
self.should_terminate = False
return self
@objc.IBAction
def showPreferences_(self, sender):
PreferencesWindowController.getPreferencesWindow()
def applicationDidFinishLaunching_(self, aNotification):
self.testFileTypeBinding()
if not self.initial_action_done:
self.initial_action_done = True
self.showPreferences_(self)
def shouldShowUI(self):
if not self.initial_action_done:
self.should_terminate = True
self.initial_action_done = True
if NSApp().currentEvent().modifierFlags() & NSAlternateKeyMask:
return True
return False
def shouldTerminate(self):
return self.should_terminate
def applicationShouldOpenUntitledFile_(self, sender):
return False
def testFileTypeBinding(self):
if NSUserDefaults.standardUserDefaults().boolForKey_(u'SkipFileBindingTest'):
return
bndl = NSBundle.mainBundle()
myURL = NSURL.fileURLWithPath_(bndl.bundlePath())
myName = bndl.infoDictionary()[u'CFBundleName']
for ext in PYTHON_EXTENSIONS:
err, outRef, outURL = LSGetApplicationForInfo(kLSUnknownType, kLSUnknownCreator, u'txt', kLSRolesViewer, None, None)
if (err or myURL != outURL):
res = NSRunAlertPanel(
u'File type binding',
FILE_TYPE_BINDING_MESSAGE % myName,
u'OK',
u"Don't show this warning again",
None)
if res == 0:
NSUserDefaults.standardUserDefaults().setObject_forKey_(u'YES', u'SkipFileBindingTest')
return
if __name__ == '__main__':
AppHelper.runEventLoop()
|
{
"content_hash": "31213fabcfc1166517baf7dfe998458b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 128,
"avg_line_length": 35.5,
"alnum_prop": 0.6416735708367854,
"repo_name": "albertz/music-player",
"id": "0059488adaf1751a281b3ad6c6cdfc69f01d2fc2",
"size": "2414",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Cocoa/Examples/AppKit/PyObjCLauncher/PyObjCLauncher.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
import os
from os.path import join as pjoin
import shutil
import time
import logging
import matplotlib
matplotlib.use('Agg', warn=False)
import numpy as np
import matplotlib.pyplot as plt
import seaborn
seaborn.set_style("dark")
import pandas as pd
from collections import OrderedDict
from datetime import datetime
import csv
import itertools
import stabilityfuncs as sf
from mako.lookup import TemplateLookup
makolookup = TemplateLookup(directories=['./tpl'])
# noinspection PyPep8Naming
def stabilitysummary(datadirectory, outputdirectory, whichscan, TargetisBIRNphantom):
logging.debug('stabilitysummary: running as stabilitysummary {} {} {} {}'.format(datadirectory,
outputdirectory,
whichscan,
'' if TargetisBIRNphantom else '--nonbirn'))
# initialize the outut directory if need be
if not os.path.exists(pjoin(outputdirectory, whichscan)):
os.makedirs(pjoin(outputdirectory, whichscan))
phantomtype = 'BIRN' if TargetisBIRNphantom else 'NONBIRN'
# scan the data directory for stability scans and populate the dict
stabilitydirs = os.listdir(datadirectory)
stabilitydirs = sorted([filename for filename in stabilitydirs if filename.startswith("stability_")])
datadict = {}
filenumber_TARGET = 0
for summaryfile in stabilitydirs:
logging.info('Beginning processing for ' + summaryfile)
datadict[filenumber_TARGET] = {}
try:
datadict[filenumber_TARGET]['datadir'] = pjoin(summaryfile, whichscan, 'procresults')
try:
datadict[filenumber_TARGET].update(sf.dict_from_tsvfile(
pjoin(datadirectory, datadict[filenumber_TARGET]['datadir'], 'analysissummary.txt')))
ObjectisBIRNphantom = (datadict[filenumber_TARGET]['Object'] == 'BIRN phantom')
if ObjectisBIRNphantom == TargetisBIRNphantom:
filenumber_TARGET += 1
except IOError:
pass
except KeyError:
pass
plot_epoch = pd.Timestamp(sf.stabilityparms('epoch', 'plots'))
plot_timelims = (plot_epoch, pd.Timestamp(datetime.now()))
df = pd.DataFrame.from_dict(datadict, orient='index', dtype=float)
df.DateTime = pd.to_datetime(df.DateTime)
# normalize the min and max error values by the means
df.odd_ghost_min -= df.odd_ghost_mean
df.odd_ghost_max -= df.odd_ghost_mean
df.even_ghost_min -= df.even_ghost_mean
df.even_ghost_max -= df.even_ghost_mean
# read the plot config file
plotconfig = {}
for row in csv.DictReader(open('config/plots.csv')):
key = row.pop('plot')
row['variables'] = row['variables'].split(';')
row['legends'] = row['legends'].split(';')
plotconfig[key] = row
mostrecenttimes = {}
for targetcoil in ['TxRx_Head', 'HeadMatrix', '32Ch_Head']:
mostrecenttimes[targetcoil] = df[df.Coil == targetcoil].DateTime.max()
dftc = df[df.Coil == targetcoil].groupby('DateTime')
# plot ghost. this one is a little special because of the error bars, so we do it manually
even_errors = np.array(df.ix[df.Coil == targetcoil, ['even_ghost_min', 'even_ghost_max']]).T
odd_errors = np.array(df.ix[df.Coil == targetcoil, ['odd_ghost_min', 'odd_ghost_max']]).T
plt.hold(True)
dftc['even_ghost_mean'].mean().plot(yerr=even_errors, marker='*', linestyle='None', label='Evens')
dftc['odd_ghost_mean'].mean().plot(yerr=odd_errors, marker='.', linestyle='None', label='Odds')
plt.xlim(plot_timelims)
plt.ylim(0, 15)
plt.title('Ghost percentage')
plt.xlabel('Date')
plt.ylabel('Ghost amplitude (%)')
plt.legend()
plt.savefig(pjoin(outputdirectory, whichscan, '{}_ghost.png'.format(targetcoil)), format='png')
plt.hold(False)
plt.close()
# now let's do the rest from the config file.
for plotfilename, config in plotconfig.items():
plt.hold(True)
marker = itertools.cycle(('*', '.', 'o', '+', 'h'))
for i, variable in enumerate(config['variables']):
dftc[variable].mean().plot(marker=marker.next(), linestyle='None', label=config['legends'][i])
plt.xlim(plot_timelims)
plt.ylim(float(config['ymin']), float(config['ymax']))
plt.xlabel('Date')
plt.ylabel(config['ylabel'])
plt.title(config['title'])
plt.legend()
plt.hold(False)
plt.savefig(pjoin(outputdirectory, whichscan, '{}_{}.png'.format(targetcoil, plotfilename)), format='png')
plt.close()
for targetcoil in ['TxRx_Head', 'HeadMatrix', '32Ch_Head']:
outscandir = pjoin(outputdirectory, whichscan)
# TODO there's a simpler way to do this.
for i in range(filenumber_TARGET - 1, -1, -1):
if datadict[i]['Coil'] == targetcoil:
# copy the individual scan data if necessary
dat_procresults = pjoin(datadirectory, datadict[i]['datadir'])
out_procresults = pjoin(outputdirectory, whichscan, datadict[i]['datadir'])
if os.path.exists(out_procresults):
copypreamble = out_procresults + " exists..."
desttime = os.path.getmtime(out_procresults)
sourcetime = os.path.getmtime(pjoin(datadirectory, datadict[i]['datadir']))
if sourcetime >= desttime:
logging.debug(copypreamble + "and is modified - copying " + dat_procresults)
logging.debug('time difference={}'.format(desttime - sourcetime))
shutil.rmtree(out_procresults)
shutil.copytree(dat_procresults, out_procresults)
else:
logging.debug(copypreamble + "and is current - not copying")
else:
logging.debug(out_procresults + " does not already exist... copying")
shutil.copytree(dat_procresults, out_procresults)
# generate a report file
thisdate = time.strftime("%m/%d/%Y %H:%M:%S", time.localtime())
args32 = str(mostrecenttimes.get('32Ch_Head', '1970')).replace(' ', 'T')
args12 = str(mostrecenttimes.get('HeadMatrix', '1970')).replace(' ', 'T')
argscp = str(mostrecenttimes.get('TxRx_Head', '1970')).replace(' ', 'T')
tpl = makolookup.get_template('stabilityreport.html')
with open(pjoin(outputdirectory, whichscan, 'stabilityreport.html'), 'w') as fp:
coiltemplatedata = OrderedDict()
for targetcoil in ['TxRx_Head', 'HeadMatrix', '32Ch_Head']:
coiltemplatedata[targetcoil] = []
specs = sf.getlimits(targetcoil)
for i in range(filenumber_TARGET - 1, -1, -1):
if datadict[i]['Coil'] == targetcoil:
themarker = ""
flag = 0
for specid, spec in specs.items():
if spec['critical']:
if sf.limitcheck(datadict[i][specid], spec) > flag:
flag = sf.limitcheck(datadict[i][specid], spec)
if sf.limitcheck(datadict[i][specid], spec) > 0:
themarker = themarker + sf.qualitytag(spec['flag'], flag)
else:
themarker += " "
# populate the output db
coiltemplatedata[targetcoil].append({
'path': datadict[i]['datadir'],
'datetime': datadict[i]['Date'] + ' ' + datadict[i]['Time'],
'marker': themarker})
fp.write(tpl.render(**locals()))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Create the stability summary.')
parser.add_argument('--nonbirn', action='store_true', help='process as non-BIRN data')
parser.add_argument('datadirectory')
parser.add_argument('outputdirectory')
parser.add_argument('whichscan')
args = parser.parse_args()
stabilitysummary(args.datadirectory, args.outputdirectory, args.whichscan, not args.nonbirn)
|
{
"content_hash": "27daf3218f81f23bc0f8fe040e9fda61",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 129,
"avg_line_length": 46.69230769230769,
"alnum_prop": 0.5835490703694987,
"repo_name": "dmd/stabilitycalc",
"id": "20ceeedd243b8cc73522201c151e3b360e819ddf",
"size": "8521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stabilitysummary.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "25616"
},
{
"name": "Python",
"bytes": "100312"
},
{
"name": "Shell",
"bytes": "1592"
}
],
"symlink_target": ""
}
|
"""
Matplotlib plotting backend wrapper.
This module enables converting matplotlib plotting commands to
vispy plotting commands. Support is experimental and incomplete,
proceed with caution.
"""
__all__ = ['show']
try:
from matplotlib.pyplot import * # noqa
except ImportError:
def show():
raise ImportError('matplotlib could not be found')
else:
from ._mpl_to_vispy import show # noqa
|
{
"content_hash": "229b8af5c1d73f493da0aae711fccbad",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 24.176470588235293,
"alnum_prop": 0.7226277372262774,
"repo_name": "jay3sh/vispy",
"id": "ffa7134460bb740c0f81ad32a81a09cdff737bb4",
"size": "556",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "vispy/mpl_plot/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "202285"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2981105"
}
],
"symlink_target": ""
}
|
"""Converts the CuBERT VMR NC dataset (w/o output token copies) to PLUR."""
# TODO: Add a test for this dataset.
from plur.stage_1 import cubert_variable_misuse_repair_dataset
from plur.stage_1 import plur_dataset
class CuBertVariableMisuseRepairNoCopyDataset(
cubert_variable_misuse_repair_dataset.CuBertVariableMisuseRepairDataset):
"""As per superclass, but does not allow output copying."""
def __init__(self,
stage_1_dir,
configuration: plur_dataset.Configuration,
*args,
**kwargs) -> None:
"""As per superclass."""
super().__init__(
stage_1_dir, configuration, allow_output_copy=False, *args, **kwargs)
def dataset_name(self) -> str:
"""As per superclass."""
return 'cubert_variable_misuse_repair_nocopy_dataset'
def dataset_description(self) -> str:
"""As per superclass."""
return (super().dataset_description() +
'It confounds all output tokens, to prevent copying from input.')
|
{
"content_hash": "1cc8c8ad42cf1b1de590cb130fcf5d7b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 36.07142857142857,
"alnum_prop": 0.6613861386138614,
"repo_name": "google-research/plur",
"id": "e59d4a4f066b952ffcd06720a219a85cba19f5e2",
"size": "1585",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plur/stage_1/cubert_variable_misuse_repair_nocopy_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "630536"
}
],
"symlink_target": ""
}
|
"""
WSGI config for Quotes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Quotes.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "acb3d10617aa24d13cf36abf96b93b98",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.642857142857142,
"alnum_prop": 0.772609819121447,
"repo_name": "GitSomeCode/Random-Quote",
"id": "a78ff51599a44a48db828a0aedbe15b37726dc1d",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Quotes/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "257"
},
{
"name": "Perl",
"bytes": "61"
},
{
"name": "Python",
"bytes": "5909"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import logging
import unittest
from unittest.mock import MagicMock, patch
import pytest
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.models.dag import DAG
from airflow.providers.apache.livy.hooks.livy import BatchState, LivyHook
from airflow.providers.apache.livy.operators.livy import LivyOperator
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
mock_livy_client = MagicMock()
BATCH_ID = 100
LOG_RESPONSE = {"total": 3, "log": ["first_line", "second_line", "third_line"]}
class TestLivyOperator(unittest.TestCase):
def setUp(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", default_args=args)
db.merge_conn(
Connection(
conn_id="livyunittest", conn_type="livy", host="localhost:8998", port="8998", schema="http"
)
)
@patch(
"airflow.providers.apache.livy.operators.livy.LivyHook.dump_batch_logs",
return_value=None,
)
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state")
def test_poll_for_termination(self, mock_livy, mock_dump_logs):
state_list = 2 * [BatchState.RUNNING] + [BatchState.SUCCESS]
def side_effect(_, retry_args):
if state_list:
return state_list.pop(0)
# fail if does not stop right before
raise AssertionError()
mock_livy.side_effect = side_effect
task = LivyOperator(file="sparkapp", polling_interval=1, dag=self.dag, task_id="livy_example")
task._livy_hook = task.get_hook()
task.poll_for_termination(BATCH_ID)
mock_livy.assert_called_with(BATCH_ID, retry_args=None)
mock_dump_logs.assert_called_with(BATCH_ID)
assert mock_livy.call_count == 3
@patch(
"airflow.providers.apache.livy.operators.livy.LivyHook.dump_batch_logs",
return_value=None,
)
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state")
def test_poll_for_termination_fail(self, mock_livy, mock_dump_logs):
state_list = 2 * [BatchState.RUNNING] + [BatchState.ERROR]
def side_effect(_, retry_args):
if state_list:
return state_list.pop(0)
# fail if does not stop right before
raise AssertionError()
mock_livy.side_effect = side_effect
task = LivyOperator(file="sparkapp", polling_interval=1, dag=self.dag, task_id="livy_example")
task._livy_hook = task.get_hook()
with pytest.raises(AirflowException):
task.poll_for_termination(BATCH_ID)
mock_livy.assert_called_with(BATCH_ID, retry_args=None)
mock_dump_logs.assert_called_with(BATCH_ID)
assert mock_livy.call_count == 3
@patch(
"airflow.providers.apache.livy.operators.livy.LivyHook.dump_batch_logs",
return_value=None,
)
@patch(
"airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state",
return_value=BatchState.SUCCESS,
)
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.post_batch", return_value=BATCH_ID)
def test_execution(self, mock_post, mock_get, mock_dump_logs):
task = LivyOperator(
livy_conn_id="livyunittest",
file="sparkapp",
polling_interval=1,
dag=self.dag,
task_id="livy_example",
)
task.execute(context={})
call_args = {k: v for k, v in mock_post.call_args[1].items() if v}
assert call_args == {"file": "sparkapp"}
mock_get.assert_called_once_with(BATCH_ID, retry_args=None)
mock_dump_logs.assert_called_once_with(BATCH_ID)
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.post_batch")
def test_execution_with_extra_options(self, mock_post):
extra_options = {"check_response": True}
task = LivyOperator(
file="sparkapp", dag=self.dag, task_id="livy_example", extra_options=extra_options
)
task.execute(context={})
assert task.get_hook().extra_options == extra_options
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.delete_batch")
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.post_batch", return_value=BATCH_ID)
def test_deletion(self, mock_post, mock_delete):
task = LivyOperator(
livy_conn_id="livyunittest", file="sparkapp", dag=self.dag, task_id="livy_example"
)
task.execute(context={})
task.kill()
mock_delete.assert_called_once_with(BATCH_ID)
def test_injected_hook(self):
def_hook = LivyHook(livy_conn_id="livyunittest")
task = LivyOperator(file="sparkapp", dag=self.dag, task_id="livy_example")
task._livy_hook = def_hook
assert task.get_hook() == def_hook
@patch(
"airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state",
return_value=BatchState.SUCCESS,
)
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_logs", return_value=LOG_RESPONSE)
@patch("airflow.providers.apache.livy.operators.livy.LivyHook.post_batch", return_value=BATCH_ID)
def test_log_dump(self, mock_post, mock_get_logs, mock_get):
task = LivyOperator(
livy_conn_id="livyunittest",
file="sparkapp",
dag=self.dag,
task_id="livy_example",
polling_interval=1,
)
with self.assertLogs(task.get_hook().log, level=logging.INFO) as cm:
task.execute(context={})
assert "INFO:airflow.providers.apache.livy.hooks.livy.LivyHook:first_line" in cm.output
assert "INFO:airflow.providers.apache.livy.hooks.livy.LivyHook:second_line" in cm.output
assert "INFO:airflow.providers.apache.livy.hooks.livy.LivyHook:third_line" in cm.output
mock_get.assert_called_once_with(BATCH_ID, retry_args=None)
mock_get_logs.assert_called_once_with(BATCH_ID, 0, 100)
|
{
"content_hash": "8dba3af9cee9c06d430d7dcb2afb47d7",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 109,
"avg_line_length": 38.59748427672956,
"alnum_prop": 0.6488512302427897,
"repo_name": "nathanielvarona/airflow",
"id": "8f4e7e04f5b2397613195d7feb05b7c9cccff357",
"size": "6922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/apache/livy/operators/test_livy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
}
|
import setuptools
setuptools.setup(
setup_requires=['pbr>=1.3'],
pbr=True)
|
{
"content_hash": "dbd108d6efd1da8c2a89c6aef0e499e4",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 16.8,
"alnum_prop": 0.6666666666666666,
"repo_name": "citrix-openstack/nodepool",
"id": "4e7501ef5e0098e756e316fe07e9c1235372ca5e",
"size": "717",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "283969"
},
{
"name": "Shell",
"bytes": "10184"
}
],
"symlink_target": ""
}
|
'''
Lux extension for integrating SQL and NoSQL into applications.
The extension create create a new application attribute called ``odm``
which can be used to access object data mappers for different backend.
To access the ``sql`` mapper:
sql = app.odm('sql')
in a router handler:
def get(self, request):
sql = request.app.odm('sql')
with sql.session().begin() as session:
...
'''
import lux
from lux import Parameter
from pulsar.utils.log import LocalMixin
from .exc import *
from .mapper import Mapper, Model
from .serialise import tojson
from .views import CRUD
class Extension(lux.Extension):
'''Object data mapper extension
'''
_config = [
Parameter('DATASTORE', None,
'Dictionary for mapping models to their back-ends database')
]
def on_config(self, app):
'''Initialise Object Data Mapper'''
app.odm = Odm(app, app.config['DATASTORE'])
class Odm(LocalMixin):
'''Lazy object data mapper container
Usage:
odm = app.odm()
'''
def __init__(self, app, binds):
self.app = app
self.binds = binds
def __call__(self):
if self.local.mapper is None:
self.local.mapper = Mapper(self.app, self.binds)
return self.local.mapper
def database_create(self, database, **params):
odm = Odm(self.app, self.binds)
odm.local.mapper = self().database_create(database, **params)
return odm
|
{
"content_hash": "bd9d56437abd9481288a808a51fb1a02",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 24.360655737704917,
"alnum_prop": 0.6318977119784657,
"repo_name": "tazo90/lux",
"id": "137f82302f85263794a411750a1f5d6c98aaba75",
"size": "1486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/extensions/odm/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85029"
},
{
"name": "HTML",
"bytes": "17331"
},
{
"name": "JavaScript",
"bytes": "354892"
},
{
"name": "Python",
"bytes": "543161"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
workspace_name: str,
subscription_id: str,
*,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if skip is not None:
_params["$skip"] = _SERIALIZER.query("skip", skip, "str")
if list_view_type is not None:
_params["listViewType"] = _SERIALIZER.query("list_view_type", list_view_type, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
"name": _SERIALIZER.url("name", name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
"name": _SERIALIZER.url("name", name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, workspace_name: str, name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
"name": _SERIALIZER.url("name", name, "str", pattern=r"^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
class DataContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.machinelearningservices.MachineLearningServicesMgmtClient`'s
:attr:`data_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, _models.ListViewType]] = None,
**kwargs: Any
) -> Iterable["_models.DataContainer"]:
"""List data containers.
List data containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
:param list_view_type: View type for including/excluding (for example) archived entities. Known
values are: "ActiveOnly", "ArchivedOnly", and "All". Default value is None.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataContainer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.DataContainer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.DataContainerResourceArmPaginatedResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip=skip,
list_view_type=list_view_type,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DataContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data"
}
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any
) -> None:
"""Delete container.
Delete container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param name: Container name. Required.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
}
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, name: str, **kwargs: Any) -> _models.DataContainer:
"""Get container.
Get container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param name: Container name. Required.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataContainer", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
}
@overload
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
body: _models.DataContainer,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataContainer:
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
:type body: ~azure.mgmt.machinelearningservices.models.DataContainer
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataContainer:
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
body: Union[_models.DataContainer, IO],
**kwargs: Any
) -> _models.DataContainer:
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param name: Container name. Required.
:type name: str
:param body: Container entity to create or update. Is either a model type or a IO type.
Required.
:type body: ~azure.mgmt.machinelearningservices.models.DataContainer or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataContainer or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataContainer
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataContainer] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "DataContainer")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DataContainer", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DataContainer", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}"
}
|
{
"content_hash": "08fde9107ebf667f956040a6ea10e442",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 166,
"avg_line_length": 42.26973684210526,
"alnum_prop": 0.6383268482490272,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3b5d3acc38c6c7ab2589c4eb0f49fa84a9b66a6b",
"size": "26200",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_data_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class PolicykeyResource(Model):
"""Namespace/NotificationHub Regenerate Keys.
:param policy_key: Name of the key that has to be regenerated for the
Namespace/Notification Hub Authorization Rule. The value can be Primary
Key/Secondary Key.
:type policy_key: str
"""
_attribute_map = {
'policy_key': {'key': 'policyKey', 'type': 'str'},
}
def __init__(self, policy_key=None):
self.policy_key = policy_key
|
{
"content_hash": "c3bfca9a61be47e471d89237bb0bc721",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 27.88888888888889,
"alnum_prop": 0.6593625498007968,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "f7707d3d77613f041682501351efa0f8b17e8ca0",
"size": "976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/policykey_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
}
|
from django.db import connection
from apps.commons.control_model import SoftDeletionManager
class MarketManager(SoftDeletionManager):
def raw_as_qs(self, raw_query, params=()):
"""Execute a raw query and return a QuerySet. The first column in the
result set must be the id field for the model.
:type raw_query: str | unicode
:type params: tuple[T] | dict[str | unicode, T]
:rtype: django.db.models.query.QuerySet
"""
cursor = connection.cursor()
try:
cursor.execute(raw_query, params)
return self.filter(id__in=(x[0] for x in cursor))
finally:
cursor.close()
|
{
"content_hash": "3b3bad290adc663c923d79c6dad1e31c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 35.526315789473685,
"alnum_prop": 0.6266666666666667,
"repo_name": "we-inc/mms-snow-white-and-the-seven-pandas",
"id": "87e921538b8e38012e888582346d6c717ce4fdec",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webserver/apps/markets/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30661"
},
{
"name": "HTML",
"bytes": "96287"
},
{
"name": "JavaScript",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "180875"
},
{
"name": "Shell",
"bytes": "1018"
},
{
"name": "TypeScript",
"bytes": "289907"
}
],
"symlink_target": ""
}
|
from slicc.ast.ExprAST import ExprAST
from slicc.symbols import Func, Type
class FuncCallExprAST(ExprAST):
def __init__(self, slicc, proc_name, exprs):
super(FuncCallExprAST, self).__init__(slicc)
self.proc_name = proc_name
self.exprs = exprs
def __repr__(self):
return "[FuncCallExpr: %s %s]" % (self.proc_name, self.exprs)
def generate(self, code):
machine = self.state_machine
if self.proc_name == "DPRINTF":
# Code for inserting the location of the DPRINTF()
# statement in the .sm file in the statement it self.
# 'self.exprs[0].location' represents the location.
# 'format' represents the second argument of the
# original DPRINTF() call. It is left unmodified.
# str_list is used for concatenating the argument
# list following the format specifier. A DPRINTF()
# call may or may not contain any arguments following
# the format specifier. These two cases need to be
# handled differently. Hence the check whether or not
# the str_list is empty.
format = "%s" % (self.exprs[1].inline())
format_length = len(format)
str_list = []
for i in range(2, len(self.exprs)):
str_list.append("%s" % self.exprs[i].inline())
if len(str_list) == 0:
code('DPRINTF(RubySlicc, "$0: $1")',
self.exprs[0].location, format[2:format_length-2])
else:
code('DPRINTF(RubySlicc, "$0: $1", $2)',
self.exprs[0].location, format[2:format_length-2],
', '.join(str_list))
return self.symtab.find("void", Type)
# hack for adding comments to profileTransition
if self.proc_name == "APPEND_TRANSITION_COMMENT":
# FIXME - check for number of parameters
code("APPEND_TRANSITION_COMMENT($0)", self.exprs[0].inline())
return self.symtab.find("void", Type)
# Look up the function in the symbol table
func = self.symtab.find(self.proc_name, Func)
# Check the types and get the code for the parameters
if func is None:
self.error("Unrecognized function name: '%s'", self.proc_name)
if len(self.exprs) != len(func.param_types):
self.error("Wrong number of arguments passed to function : '%s'" +\
" Expected %d, got %d", self.proc_name,
len(func.param_types), len(self.exprs))
cvec = []
type_vec = []
for expr,expected_type in zip(self.exprs, func.param_types):
# Check the types of the parameter
actual_type,param_code = expr.inline(True)
if str(actual_type) != str(expected_type):
expr.error("Type mismatch: expected: %s actual: %s" % \
(expected_type, actual_type))
cvec.append(param_code)
type_vec.append(expected_type)
# OK, the semantics of "trigger" here is that, ports in the
# machine have different priorities. We always check the first
# port for doable transitions. If nothing/stalled, we pick one
# from the next port.
#
# One thing we have to be careful as the SLICC protocol
# writter is : If a port have two or more transitions can be
# picked from in one cycle, they must be independent.
# Otherwise, if transition A and B mean to be executed in
# sequential, and A get stalled, transition B can be issued
# erroneously. In practice, in most case, there is only one
# transition should be executed in one cycle for a given
# port. So as most of current protocols.
if self.proc_name == "trigger":
code('''
{
Address addr = ${{cvec[1]}};
''')
if machine.TBEType != None and machine.EntryType != None:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[3]}}, addr);
''')
elif machine.TBEType != None:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, addr);
''')
elif machine.EntryType != None:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, addr);
''')
else:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, addr);
''')
code('''
if (result == TransitionResult_Valid) {
counter++;
continue; // Check the first port again
}
if (result == TransitionResult_ResourceStall) {
scheduleEvent(this, 1);
// Cannot do anything with this transition, go check next doable transition (mostly likely of next port)
}
}
''')
elif self.proc_name == "doubleTrigger":
# NOTE: Use the doubleTrigger call with extreme caution
# the key to double trigger is the second event triggered
# cannot fail becuase the first event cannot be undone
assert len(cvec) == 4
code('''
{
Address addr1 = ${{cvec[1]}};
TransitionResult result1 =
doTransition(${{cvec[0]}}, ${machine}_getState(addr1), addr1);
if (result1 == TransitionResult_Valid) {
//this second event cannont fail because the first event
// already took effect
Address addr2 = ${{cvec[3]}};
TransitionResult result2 = doTransition(${{cvec[2]}}, ${machine}_getState(addr2), addr2);
// ensure the event suceeded
assert(result2 == TransitionResult_Valid);
counter++;
continue; // Check the first port again
}
if (result1 == TransitionResult_ResourceStall) {
scheduleEvent(this, 1);
// Cannot do anything with this transition, go check next
// doable transition (mostly likely of next port)
}
}
''')
elif self.proc_name == "error":
code("$0", self.exprs[0].embedError(cvec[0]))
elif self.proc_name == "assert":
error = self.exprs[0].embedError('"assert failure"')
code('''
#ifndef NDEBUG
if (!(${{cvec[0]}})) {
$error
}
#endif
''')
elif self.proc_name == "continueProcessing":
code("counter++;")
code("continue; // Check the first port again")
elif self.proc_name == "set_cache_entry":
code("set_cache_entry(m_cache_entry_ptr, %s);" %(cvec[0]));
elif self.proc_name == "unset_cache_entry":
code("unset_cache_entry(m_cache_entry_ptr);");
elif self.proc_name == "set_tbe":
code("set_tbe(m_tbe_ptr, %s);" %(cvec[0]));
elif self.proc_name == "unset_tbe":
code("unset_tbe(m_tbe_ptr);");
else:
# Normal function
# if the func is internal to the chip but not the machine
# then it can only be accessed through the chip pointer
internal = ""
if "external" not in func and not func.isInternalMachineFunc:
internal = "m_chip_ptr->"
params = ""
first_param = True
for (param_code, type) in zip(cvec, type_vec):
if first_param:
params = str(param_code)
first_param = False
else:
params += ', '
params += str(param_code);
fix = code.nofix()
code('(${internal}${{func.c_ident}}($params))')
code.fix(fix)
return func.return_type
|
{
"content_hash": "970e42baf947552fc323f748fdb83b6b",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 112,
"avg_line_length": 37.63235294117647,
"alnum_prop": 0.5550345186921974,
"repo_name": "xiaoyaozi5566/DiamondCache",
"id": "ab6261de270a0d036b9753bf5d74797f5b1c0774",
"size": "9280",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/mem/slicc/ast/FuncCallExprAST.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232026"
},
{
"name": "Batchfile",
"bytes": "39416"
},
{
"name": "C",
"bytes": "803908"
},
{
"name": "C++",
"bytes": "9917079"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "269390"
},
{
"name": "Hack",
"bytes": "2615"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "15748"
},
{
"name": "Perl",
"bytes": "45848431"
},
{
"name": "Perl6",
"bytes": "47620411"
},
{
"name": "Prolog",
"bytes": "1032702"
},
{
"name": "Python",
"bytes": "3265983"
},
{
"name": "Ruby",
"bytes": "54498"
},
{
"name": "Scilab",
"bytes": "56104"
},
{
"name": "Shell",
"bytes": "50545"
},
{
"name": "TeX",
"bytes": "19361"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "16048"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
import horizon
from horizon.test.test_dashboards.dogs import dashboard
class Puppies(horizon.Panel):
name = _("Puppies")
slug = "puppies"
dashboard.Dogs.register(Puppies)
|
{
"content_hash": "40607ad1ca9766f3764ae02474ba41f4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 18.53846153846154,
"alnum_prop": 0.7510373443983402,
"repo_name": "zestrada/horizon-cs498cc",
"id": "a660436d9454cff7785e18fa9f9d0cf1d2f41748",
"size": "241",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "horizon/test/test_dashboards/dogs/puppies/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "271158"
},
{
"name": "Python",
"bytes": "1562543"
},
{
"name": "Shell",
"bytes": "12674"
}
],
"symlink_target": ""
}
|
class Entity:
def __init__(self, name):
self.name = name
self._ports = []
self._generics = []
def add_port(self, port):
self._ports.append(port)
def get_ports(self):
for port in self._ports:
yield port
|
{
"content_hash": "1f17f4eabcffb3784581e19a13e833f8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 32,
"avg_line_length": 20.692307692307693,
"alnum_prop": 0.5130111524163569,
"repo_name": "popas90/vhdl-parser",
"id": "a9734ebc23ced5a0c959d853ec40b69297d52fd0",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Entity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "33542"
},
{
"name": "Python",
"bytes": "28545"
},
{
"name": "Shell",
"bytes": "1878"
},
{
"name": "VHDL",
"bytes": "434"
}
],
"symlink_target": ""
}
|
from pywps.tests import WpsClient, WpsTestResponse
class WpsTestClient(WpsClient):
def get(self, *args, **kwargs):
query = "?"
for key, value in kwargs.iteritems():
query += "{0}={1}&".format(key, value)
return super(WpsTestClient, self).get(query)
def client_for(service):
return WpsTestClient(service, WpsTestResponse)
|
{
"content_hash": "01ca33e19dcfbd110f50e88e5a67ba2c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 52,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.6513513513513514,
"repo_name": "bird-house/birdhouse-workshop",
"id": "c0eba7c842056032a736815d4dad7f9964477880",
"size": "370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/tests/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "281359"
},
{
"name": "Python",
"bytes": "18958"
}
],
"symlink_target": ""
}
|
'''
hdf5 viewer to dispay images, tables, attributes and trees of a hdf5 file.
'''
import os,sys
import wx,h5py
import wx.py
from hdfTree import *
from hdfGrid import *
from hdfAttrib import *
from hdfImage import *
try:
from hdfImageGL import *
except ImportError as e:
print 'ImportError: '+e.message
try:
from FrmPyFAI import *
except ImportError as e:
print 'ImportError: '+e.message
try:
from FrmProcRoiStat import ProcRoiStatFrame
except ImportError as e:
print 'ImportError: '+e.message
import utilities as ut
class AboutFrame(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,-1,'About h5pyViewer',size=(300,330))
imgDir=ut.Path.GetImage()
icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
self.Centre()
panel=wx.Panel(self,-1)
import pkg_resources
v=pkg_resources.get_distribution("h5pyViewer")
s='Version:'+str(v)+'\n(c) www.psi.ch\n Author: Thierry Zamofing\n thierry.zamofing@psi.ch'
st0=wx.StaticText(panel,-1,s,(30,10))
bmp = wx.StaticBitmap(panel,-1,wx.Bitmap(os.path.join(imgDir,'splash1.png'), wx.BITMAP_TYPE_ANY ), (30,st0.Position[1]+st0.Size[1]+10))
for k,v in os.environ.iteritems():
print k,'=',v
class HdfTreePopupMenu(wx.Menu):
def __init__(self, wxObjSrc):
wx.Menu.__init__(self)
self.wxObjSrc=wxObjSrc
self.AddMenu(self.OnShowAttrib,"Show Attributes")
self.AddMenu(self.OnShowData,"Show Data")
self.AddMenu(self.OnShowImage,"Show Image")
self.AddMenu(self.OnShowImageGL,"Show Image OpenGL")
self.AddMenu(self.OnShowImgFAI1D,"Show Azimutal Integral Image 1D")
self.AddMenu(self.OnShowImgFAI2D,"Show Azimutal Integral Image 2D")
self.AddMenu(self.OnShowRoiStat,"Show Roi Statistics")
self.AddMenu(self.OnShell,"Python Shell")
self.AddMenu(self.OnPrintProperties,"Print Properties")
self.AddMenu(self.OnItem2,"Item Two")
self.AddMenu(self.OnItem3,"Item Three")
def AddMenu(self,func,lbl):
item = wx.MenuItem(self, -1, lbl)
self.AppendItem(item);
self.Bind(wx.EVT_MENU, func, item)
return item
def OnShowAttrib(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
if type(hid)==h5py.h5f.FileID:
hid=h5py.h5o.open(hid,'/')
frame=HdfAttribFrame(wxTree,lbl,hid)
frame.Show(True)
def OnShowData(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
frame=HdfGridFrame(wxTree,lbl,hid)
frame.Show(True)
def OnShowImage(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
frame=HdfImageFrame(wxTree,lbl,hid)
frame.Show(True)
def OnShowImageGL(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
frame=HdfImageGLFrame(wxTree,lbl,hid)
frame.Show(True)
def OnShowImgFAI1D(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
frame=HdfPyFAI1DFrame(wxTree,lbl,hid)
frame.Show(True)
def OnShowImgFAI2D(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
frame=HdfPyFAIFrame(wxTree,lbl,hid)
frame.Show(True)
def OnShowRoiStat(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
if type(hid)==tuple: hid=hid[0] #external link->get dataset
dlg = wx.FileDialog(wxTree, "Choose valid mask file (e.g. pilatus_valid_mask.mat)", os.getcwd(), '','MATLAB files (*.mat)|*.mat|all (*.*)|*.*', wx.FD_OPEN|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
fnValMsk= dlg.GetPath()
print 'OnOpen',fnValMsk
dlg.Destroy()
if not fnValMsk: return
dlg = wx.FileDialog(wxTree, "Choose ROI mask file (e.g. pilatus_integration_mask.mat)", os.getcwd(), '','MATLAB files (*.mat)|*.mat|all (*.*)|*.*', wx.FD_OPEN|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
fnIntegMsk = dlg.GetPath()
print 'OnOpen',fnIntegMsk
dlg.Destroy()
if not fnIntegMsk: return
#fnMatRoi='/scratch/detectorData/cSAXS_2013_10_e14608_georgiadis_3D_for_Marianne/analysis/data/pilatus_integration_mask.mat'
frame=ProcRoiStatFrame(wxTree,lbl,hid,fnValMsk,fnIntegMsk)
frame.Show(True)
def OnShell(self, event):
wxTree,wxNode=self.wxObjSrc
frame = wx.Frame(wxTree, -1, "wxPyShell",size=wx.Size(800, 500))
imgDir=ut.Path.GetImage()
icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)
frame.SetIcon(icon)
frame.Centre()
wnd=app.GetTopWindow()
loc={'app' :app,
'fid' :app.GetTopWindow().fid,
'lbl' :wxTree.GetItemText(wxNode),
'hid' :wxTree.GetPyData(wxNode),
'h5py' : h5py
}
introText='''Shell to the HDF5 objects
app: application object
fid: hdf5 file object
lbl: label of selected hdf5 object
hid: selected hdf5 object
#Examples:
import h5py
ds=h5py.Dataset(hid)
ds[1,:,:]
#using user defined modules
import userSample as us;reload(us);us.test1(hid)
'''
shell=wx.py.shell.Shell(frame, introText=introText,locals=loc)
frame.Show(True)
#if loc is None, all variables are visible. the context is global
#shell.push('wnd=app.GetTopWindow()')
#for cmd in [
# 'wnd=app.GetTopWindow();wxTree=wnd.wxTree',
# 'wxNode=wnd.wxTree.GetSelection()',
# 'print wnd.fid',
# 'lbl=wxTree.GetItemText(wxNode)',
# 'hid=wxTree.GetPyData(wxNode)']:
# shell.run(cmd, prompt=False)
def OnPrintProperties(self, event):
wxTree,wxNode=self.wxObjSrc
lbl=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
print HdfViewerFrame.GetPropertyStr(wxTree,wxNode)
def OnItem2(self, event):
print 'OnItem2'
pass
def OnItem3(self, event):
print 'OnItem3'
pass
class HdfViewerFrame(wx.Frame):
def OpenFile(self,fnHDF):
try:
self.fid=h5py.h5f.open(fnHDF,flags=h5py.h5f.ACC_RDONLY)
except IOError as e:
sys.stderr.write('Unable to open File: '+fnHDF+'\n')
else:
self.wxTree.ShowHirarchy(self.fid)
def CloseFile(self):
#http://docs.wxwidgets.org/2.8/wx_windowdeletionoverview.html#windowdeletionoverview
#print 'CloseFile'
try:
self.fid.close()
del self.fid
except AttributeError as e:
pass
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=wx.Size(650, 350))
imgDir=ut.Path.GetImage()
icon = wx.Icon(os.path.join(imgDir,'h5pyViewer.ico'), wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
wxSplt = wx.SplitterWindow(self, -1)
wxTree = HdfTreeCtrl(wxSplt, 1, wx.DefaultPosition, (-1,-1), wx.TR_HAS_BUTTONS)
wxTree.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, id=1)
wxTree.Bind(wx.EVT_TREE_ITEM_MENU, self.OnMenu, id=1)
#wx.EVT_TREE_ITEM_MENU(id, func)
wxTxt = wx.StaticText(wxSplt, -1, '',(10,10) )#, style=wx.ALIGN_CENTRE)
wxSplt.SplitVertically(wxTree, wxTxt)
wxSplt.SetMinimumPaneSize(320)
#wxLstCtrl=HdfAttrListCtrl(wxSplt)
#wxSplt.SplitVertically(wxTree, wxLstCtrl)
self.BuildMenu()
self.Centre()
self.wxTree=wxTree
self.display=wxTxt
def __del__(self):
self.CloseFile()
def OnOpen(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), '','HDF5 files (*.hdf5;*.h5)|*.hdf5;*.h5|all (*.*)|*.*', wx.FD_OPEN|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
#mypath = os.path.basename(path)
#self.SetStatusText("You selected: %s" % mypath)
self.CloseFile()
self.OpenFile(path)
#print 'OnOpen',path
dlg.Destroy()
def OnCloseWindow(self, event):
#print 'OnCloseWindow'
self.Destroy()
def OnAbout(self,event):
frame=AboutFrame(self)
frame.Show()
def BuildMenu(self):
#http://wiki.wxpython.org/AnotherTutorial#wx.MenuBar
mnBar = wx.MenuBar()
#-------- File Menu --------
mn = wx.Menu()
mnItem=mn.Append(wx.ID_OPEN, '&Open', 'Open a new document');self.Bind(wx.EVT_MENU, self.OnOpen, mnItem)
#mnSub = wx.Menu()
#mnItem=mnSub.Append(wx.ID_ANY, 'SubMenuEntry', 'My SubMenuEntry')
#mn.AppendMenu(wx.ID_ANY, 'SubMenu', mnSub)
mn.AppendSeparator()
mnItem=mn.Append(wx.ID_EXIT, '&Quit', 'Quit the Application');self.Bind(wx.EVT_MENU, self.OnCloseWindow, mnItem)
mnBar.Append(mn, '&File')
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
#-------- Edit Menu --------
#mn = wx.Menu()
#mnBar.Append(mn, '&Edit')
#-------- Help Menu --------
mn = wx.Menu()
#mnItem=mn.Append(wx.ID_HELP,'Help','Application Help')
mnItem=mn.Append(wx.ID_ABOUT,'About','Application About');self.Bind(wx.EVT_MENU, self.OnAbout, mnItem)
mnBar.Append(mn, '&Help')
#mn.AppendSeparator()
#mnItem = wx.MenuItem(mn, 105, '&Quit\tCtrl+Q', 'Quit the Application')
#mnItem.SetBitmap(wx.Image('stock_exit-16.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap())
#mn.AppendItem(mnItem)
self.SetMenuBar(mnBar)
self.CreateStatusBar()
@staticmethod
def GetPath(wxTree,wxNode):
if wxTree.GetRootItem()==wxNode:
hid=wxTree.GetPyData(wxNode)
return hid.name
wxNodeParent=wxTree.GetItemParent(wxNode)
if wxTree.GetRootItem()==wxNodeParent:
return wxTree.GetItemText(wxNode)
else:
return HdfViewerFrame.GetPath(wxTree,wxNodeParent)+'/'+wxTree.GetItemText(wxNode)
@staticmethod
def GetPropertyStr(wxTree,wxNode):
path=str(HdfViewerFrame.GetPath(wxTree,wxNode))
hidStr=wxTree.GetItemText(wxNode)
hid=wxTree.GetPyData(wxNode)
#o=wxTree.GetItemData(wxNode)
#print o.Data,wxTree.GetPyData(wxNode)
#if type(gid)==h5py.h5g.GroupID:
txt=path+'\n'
t=type(hid)
if t==tuple:
if hid[0]==None:
txt+='missing external link:\n '+hid[1]+'\n '+hid[2]
return txt
else:
txt+='external link:\n '+hid[1]+'\n '+hid[2]
hid=hid[0]
t=type(hid)
if t==h5py.h5f.FileID:
txt+=type(hid).__name__+':%d\n'%hid.id
hid=h5py.h5o.open(hid,'/')
t=type(hid)
objInf=h5py.h5o.get_info(hid)
#print t,hid.id,objInf.fileno, objInf.rc, objInf.type, objInf.addr, objInf.hdr
txt+=type(hid).__name__+':%d\n'%hid.id
txt+='addr:%d fileno:%d refCnt:%d\n'%(objInf.addr,objInf.fileno, objInf.rc)
try:
wxNodeParent=wxTree.GetItemParent(wxNode)
txtParent=wxTree.GetItemText(wxNode)
dataParent=wxTree.GetPyData(wxNode)
gid=wxTree.GetPyData(wxNodeParent)
softLnk=gid.get_linkval(hidStr)
except BaseException as e:
pass
else:
txt+='Soft Link:'+softLnk+'\n'
try: numAttr=h5py.h5a.get_num_attrs(hid)
except ValueError as e:
pass
else:
if numAttr>20:
txt+='Attributes:%d (too many to show)\n'%numAttr
else:
txt+='Attributes:%d\n'%numAttr
for idxAttr in range(numAttr):
aid=h5py.h5a.open(hid,index=idxAttr)
txt+='\t'+aid.name+'\t'+str(GetAttrVal(aid))+'\n'
val=None
if t==h5py.h5g.GroupID:
pass
elif t==h5py.h5d.DatasetID:
txt+='\nshape: '+str(hid.shape)+'\n'
tt=hid.get_type()
ttt=type(tt)
if ttt==h5py.h5t.TypeCompoundID:
txt+='type: Compound\n'
elif ttt==h5py.h5t.TypeStringID:
sz=tt.get_size()
txt+='type: String (length %d)\n'%sz
else:
txt+='type: '+str(tt.dtype)+'\n'
pl=hid.get_create_plist()
txFcn=(
('chunk',h5py.h5p.PropDCID.get_chunk),
('fill time', h5py.h5p.PropDCID.get_fill_time),
('alloc_time', h5py.h5p.PropDCID.get_alloc_time),
#('class', h5py.h5p.PropDCID.get_class),
#('fill_value', h5py.h5p.PropDCID.get_fill_value),
#('filter', h5py.h5p.PropDCID.get_filter),
#('filter_by_id',h5py.h5p.PropDCID.get_filter_by_id),
('layout', h5py.h5p.PropDCID.get_layout),
('nfilters', h5py.h5p.PropDCID.get_nfilters),
#('obj_track_times', h5py.h5p.PropDCID.get_obj_track_times),
)
for tx,func in txFcn:
try: v=func(pl)
except ValueError as e: pass
else:txt+=tx+':'+str(v)+'\n'
if hid.shape==() or np.prod(hid.shape)<10: #show up to max. 10 element arrays
#if ttt==h5py.h5t.TypeStringID or hid.shape==() or hid.shape==(1,):
ds=h5py.Dataset(hid)
txt+='Value:\n\t'+str(ds.value)+'\n'
return txt
def OnSelChanged(self, event):
wxNode = event.GetItem()
txt=HdfViewerFrame.GetPropertyStr(self.wxTree,wxNode)
self.display.SetLabel(txt)
def OnMenu(self, event):
wxNode = event.GetItem()
self.PopupMenu(HdfTreePopupMenu((self.wxTree,wxNode)), event.GetPoint())
if __name__ == '__main__':
def GetArgs():
import sys,argparse #since python 2.7
exampleCmd='/scratch/detectorData/e14472_00033.hdf5'
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
epilog='Example:\n'+os.path.basename(sys.argv[0])+' '+exampleCmd+'\n ')
parser.add_argument('hdfFile', nargs='?', help='the hdf5 to show')
args = parser.parse_args()
return args
class MyApp(wx.App):
def OnInit(self):
args=GetArgs()
frame = HdfViewerFrame(None, 'h5pyViewer')
if args.hdfFile:
frame.OpenFile(args.hdfFile)
frame.Show(True)
self.SetTopWindow(frame)
return True
#------------------ Main Code ----------------------------------
#redirect stdout/stderr:
#http://www.blog.pythonlibrary.org/2009/01/01/wxpython-redirecting-stdout-stderr/
#https://groups.google.com/forum/#!topic/wxpython-users/S9uSKIYdYoo
#https://17677433047266577941.googlegroups.com/attach/e4d343dc6a751906/REDIRECT.PY?part=2&view=1&vt=ANaJVrFeyCjCMydKnkyfFbYJM7ip07mE-ozUIBxJ5A1QuK1GhycJYJsPTxpAaNk5L2LpXvGhzRPInxDt8_WUcUyK2Ois28Dq8LNebfYoWG9Yxr-tujf5Jk4
#http://www.wxpython.org/docs/api/wx.PyOnDemandOutputWindow-class.html
rd=not sys.stdout.isatty()#have a redirect window, if there is no console
#rd=True #force to open a redirect window
#rd=False #avoid a redirect window
app = MyApp(redirect=rd)
app.MainLoop()
|
{
"content_hash": "0dd429e0ed09ef1663095eb1839f109c",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 221,
"avg_line_length": 34.08545034642032,
"alnum_prop": 0.6561420150416695,
"repo_name": "ganymede42/h5pyViewer",
"id": "1bfbaf331745cbf634b316f646cb10266ceb26e1",
"size": "15231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h5pyViewer/h5pyViewer.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "103818"
}
],
"symlink_target": ""
}
|
"""
MIT License
Copyright (c) 2017 Kartik Arora
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import re
from dicttoxml import dicttoxml
from flask import Flask, render_template, json, jsonify, make_response, Response
from jinja2 import evalcontextfilter, Markup, escape
app = Flask(__name__)
with open(os.path.dirname(os.path.abspath(__file__)) + '/static/json/permission_names.json') as perms_names_fp:
data = perms_names_fp.read().encode('ascii', 'ignore')
perms_names = json.loads(data)
with open(os.path.dirname(os.path.abspath(__file__)) + '/static/json/permission_info.json') as perms_info_fp:
data = perms_info_fp.read().encode('ascii', 'ignore')
perms_info = json.loads(data)
perms_names_xml = dicttoxml(obj=perms_names, custom_root='permissions')
@app.route('/')
def home():
return render_template('layout.html', container='home', perms_names=perms_names, perms_info=perms_info)
@app.route('/list')
def list():
return render_template('layout.html', container='list', permissions=perms_names)
@app.route('/list/json', methods=['GET', 'POST'])
@app.route('/api/list/json', methods=['GET', 'POST'])
def list_json():
return make_response(jsonify(perms_names), 200)
@app.route('/list/xml', methods=['GET', 'POST'])
@app.route('/api/list/xml', methods=['GET', 'POST'])
def list_xml():
response = make_response(perms_names_xml, 200)
response.headers['Content-type'] = 'application/xml'
return response
@app.route('/detail/<string:permission>')
def detail(permission):
name = permission.upper()
permission = perms_info[name]
permission['name'] = name
return render_template('layout.html', container='detail', permission=permission)
@app.route('/detail/<string:permission>/json', methods=['GET', 'POST'])
@app.route('/api/detail/<string:permission>/json', methods=['GET', 'POST'])
def detail_json(permission):
name = permission.upper()
permission = perms_info[name]
permission['name'] = name
permission['url'] = 'https://developer.android.com/reference/android/Manifest.permission.html#' + name
return make_response(jsonify(permission), 200)
@app.route('/detail/<string:permission>/xml', methods=['GET', 'POST'])
@app.route('/api/detail/<string:permission>/xml', methods=['GET', 'POST'])
def detail_xml(permission):
name = permission.upper()
permission = perms_info[name]
permission['name'] = name
permission['url'] = 'https://developer.android.com/reference/android/Manifest.permission.html#' + name
response = make_response(dicttoxml(obj=permission, custom_root=name), 200)
response.headers['Content-type'] = 'application/xml'
return response
@app.route('/api')
def api():
return render_template('layout.html', container='api')
@app.template_filter()
@evalcontextfilter
def nl2br(eval_ctx, value):
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
result = u'\n\n'.join(u'<h5>%s</h5>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int(port), threaded=True)
|
{
"content_hash": "be0dad399f11b35fd182cc52e70bcfcd",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 111,
"avg_line_length": 37.060344827586206,
"alnum_prop": 0.6943475226796929,
"repo_name": "kartikarora/android-permissions",
"id": "3e9ee1f77c167cf8d76632043b098a3301fe01a6",
"size": "4299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "android-permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2394"
},
{
"name": "HTML",
"bytes": "26265"
},
{
"name": "Python",
"bytes": "4299"
}
],
"symlink_target": ""
}
|
from ctypes import *
from pyglet.gl import *
class GLSLException(Exception):
pass
def glsl_log(handle):
if handle == 0:
return ''
log_len = c_int(0)
glGetObjectParameterivARB(handle, GL_OBJECT_INFO_LOG_LENGTH_ARB,
byref(log_len))
if log_len.value == 0:
return ''
log = create_string_buffer(log_len.value) # does log_len include the NUL?
chars_written = c_int(0)
glGetInfoLogARB(handle, log_len.value, byref(chars_written), log)
return log.value
class Shader:
s_tag = 0
def __init__(self, name, prog):
self.name = name
self.prog = prog
self.shader = 0
self.compiling = False
self.tag = -1
self.dependencies = list()
def __del__(self):
self.destroy()
def _source(self):
if self.tag == Shader.s_tag:
return list()
self.tag = Shader.s_tag
r = list()
for d in self.dependencies:
r.extend(d._source())
r.append(self.prog)
return r
def _compile(self):
if self.shader:
return
if self.compiling:
return
self.compiling = True
self.shader = glCreateShaderObjectARB(self.shaderType())
if self.shader == 0:
raise GLSLException('faled to create shader object')
prog = c_char_p(self.prog)
length = c_int(-1)
glShaderSourceARB(self.shader,
1,
cast(byref(prog), POINTER(POINTER(c_char))),
byref(length))
glCompileShaderARB(self.shader)
self.compiling = False
compile_status = c_int(0)
glGetObjectParameterivARB(self.shader, GL_OBJECT_COMPILE_STATUS_ARB,
byref(compile_status))
if not compile_status.value:
err = glsl_log(self.shader)
glDeleteObjectARB(self.shader)
self.shader = 0
raise GLSLException('failed to compile shader', err)
def _attachTo(self, program):
if self.tag == Shader.s_tag:
return
self.tag = Shader.s_tag
for d in self.dependencies:
d._attachTo(program)
if self.isCompiled():
glAttachObjectARB(program, self.shader)
def addDependency(self, shader):
self.dependencies.append(shader)
return self
def destroy(self):
if self.shader != 0:
glDeleteObjectARB(self.shader)
def shaderType(self):
raise NotImplementedError()
def isCompiled(self):
return self.shader != 0
def attachTo(self, program):
Shader.s_tag = Shader.s_tag + 1
self._attachTo(program)
# ATI/apple's glsl compiler is broken.
def attachFlat(self, program):
if self.isCompiled():
glAttachObjectARB(program, self.shader)
def compileFlat(self):
if self.isCompiled():
return
self.shader = glCreateShaderObjectARB(self.shaderType())
if self.shader == 0:
raise GLSLException('faled to create shader object')
all_source = ['\n'.join(self._source())]
prog = (c_char_p * len(all_source))(*all_source)
length = (c_int * len(all_source))(-1)
glShaderSourceARB(self.shader,
len(all_source),
cast(prog, POINTER(POINTER(c_char))),
length)
glCompileShaderARB(self.shader)
compile_status = c_int(0)
glGetObjectParameterivARB(self.shader, GL_OBJECT_COMPILE_STATUS_ARB,
byref(compile_status))
if not compile_status.value:
err = glsl_log(self.shader)
glDeleteObjectARB(self.shader)
self.shader = 0
raise GLSLException('failed to compile shader', err)
def compile(self):
if self.isCompiled():
return
for d in self.dependencies:
d.compile()
self._compile()
class VertexShader(Shader):
def shaderType(self):
return GL_VERTEX_SHADER_ARB
class FragmentShader(Shader):
def shaderType(self):
return GL_FRAGMENT_SHADER_ARB
class ShaderProgram:
def __init__(self, vertex_shader=None, fragment_shader=None):
self.vertex_shader = vertex_shader
self.fragment_shader = fragment_shader
self.program = 0
def __del__(self):
self.destroy()
def destroy(self):
if self.program != 0:
glDeleteObjectARB(self.program)
def setShader(self, shader):
if isinstance(shader, FragmentShader):
self.fragment_shader = shader
if isinstance(shader, VertexShader):
self.vertex_shader = shader
if self.program != 0:
glDeleteObjectARB(self.program)
def link(self):
if self.vertex_shader is not None:
self.vertex_shader.compileFlat()
if self.fragment_shader is not None:
self.fragment_shader.compileFlat()
self.program = glCreateProgramObjectARB()
if self.program == 0:
raise GLSLException('failed to create program object')
if self.vertex_shader is not None:
self.vertex_shader.attachFlat(
self.program)
if self.fragment_shader is not None:
self.fragment_shader.attachFlat(
self.program)
glLinkProgramARB(self.program)
link_status = c_int(0)
glGetObjectParameterivARB(self.program, GL_OBJECT_LINK_STATUS_ARB,
byref(link_status))
if link_status.value == 0:
err = glsl_log(self.program)
glDeleteObjectARB(self.program)
self.program = 0
raise GLSLException('failed to link shader', err)
self.__class__._uloc_ = dict()
self.__class__._vloc_ = dict()
return self.program
def prog(self):
if self.program:
return self.program
return self.link()
def install(self):
p = self.prog()
if p != 0:
glUseProgramObjectARB(p)
def uninstall(self):
glUseProgramObjectARB(0)
def uniformLoc(self, var):
try:
return self.__class__._uloc_[var]
except:
if self.program == 0:
self.link()
self.__class__._uloc_[var] = v = glGetUniformLocationARB(
self.program, var)
return v
def uset1F(self, var, x):
glUniform1fARB(self.uniformLoc(var), x)
def uset2F(self, var, x, y):
glUniform2fARB(self.uniformLoc(var), x, y)
def uset3F(self, var, x, y, z):
glUniform3fARB(self.uniformLoc(var), x, y, z)
def uset4F(self, var, x, y, z, w):
glUniform4fARB(self.uniformLoc(var), x, y, z, w)
def uset1I(self, var, x):
glUniform1iARB(self.uniformLoc(var), x)
def uset3I(self, var, x, y, z):
glUniform1iARB(self.uniformLoc(var), x, y, z)
def usetM4F(self, var, m):
pass
# glUniform1iARB(self.uniformLoc(var), x, y, z)
def usetTex(self, var, u, v):
glUniform1iARB(self.uniformLoc(var), u)
glActiveTexture(GL_TEXTURE0 + u)
glBindTexture(v.gl_tgt, v.gl_id)
__all__ = ['VertexShader', 'FragmentShader', 'ShaderProgram', 'GLSLException']
|
{
"content_hash": "1d8337a4777ff2076d1c6b915cd04e16",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 78,
"avg_line_length": 27.06909090909091,
"alnum_prop": 0.5638097796883396,
"repo_name": "bitcraft/pyglet",
"id": "5568cb698812e8ffa656791bce9d8915eb68ced7",
"size": "7444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/experimental/shader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6745"
},
{
"name": "PHP",
"bytes": "2192"
},
{
"name": "Python",
"bytes": "6201398"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import wrap
from colors import blue, cyan, green, red
from pants.help.help_info_extracter import HelpInfoExtracter
class HelpFormatter(object):
def __init__(self, scope, show_recursive, show_advanced, color):
self._scope = scope
self._show_recursive = show_recursive
self._show_advanced = show_advanced
self._color = color
def _maybe_blue(self, s):
return self._maybe_color(blue, s)
def _maybe_cyan(self, s):
return self._maybe_color(cyan, s)
def _maybe_green(self, s):
return self._maybe_color(green, s)
def _maybe_red(self, s):
return self._maybe_color(red, s)
def _maybe_color(self, color, s):
return color(s) if self._color else s
def format_options(self, scope, description, option_registrations_iter):
"""Return a help message for the specified options.
:param option_registrations_iter: An iterator over (args, kwargs) pairs, as passed in to
options registration.
"""
oshi = HelpInfoExtracter(self._scope).get_option_scope_help_info(option_registrations_iter)
lines = []
def add_option(category, ohis):
if ohis:
lines.append('')
display_scope = scope or 'Global'
if category:
lines.append(self._maybe_blue('{} {} options:'.format(display_scope, category)))
else:
lines.append(self._maybe_blue('{} options:'.format(display_scope)))
if description:
lines.append(description)
lines.append(' ')
for ohi in ohis:
lines.extend(self.format_option(ohi))
add_option('', oshi.basic)
if self._show_recursive:
add_option('recursive', oshi.recursive)
if self._show_advanced:
add_option('advanced', oshi.advanced)
return lines
def format_option(self, ohi):
lines = []
arg_line = ('{args} {fromfile}{dflt}'
.format(args=self._maybe_cyan(', '.join(ohi.display_args)),
dflt=self._maybe_green('(default: {})'.format(ohi.default)),
fromfile=self._maybe_green('(@fromfile value supported) ' if ohi.fromfile
else '')))
lines.append(arg_line)
indent = ' '
lines.extend(['{}{}'.format(indent, s) for s in wrap(ohi.help, 76)])
if ohi.deprecated_message:
lines.append(self._maybe_red('{}{}.'.format(indent, ohi.deprecated_message)))
if ohi.deprecated_hint:
lines.append(self._maybe_red('{}{}'.format(indent, ohi.deprecated_hint)))
return lines
|
{
"content_hash": "6426819c2099e31516d06e6ef03472a1",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 97,
"avg_line_length": 35.78947368421053,
"alnum_prop": 0.6136029411764706,
"repo_name": "qma/pants",
"id": "bbbc57d49eab42debf02175da17d7d438e88b350",
"size": "2867",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/pants/help/help_formatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "315576"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4166893"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49622"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
from django import forms
from datetime import datetime
from django.db import connection
EXP_TYPE = (
('Temporary', 'Temporary'),
('Permanent', 'Permanent'),
('Recurrent', 'Recurrent'),
)
TAG_ID = (
('1', 'AED'),
('2', 'Free Food'),
('3', 'Event'),
('4', 'Restroom'),
('5', 'Vaccines'),
('6', 'Health Check-ups'),
('7', 'Water'),
)
class ItemForm(forms.Form):
# tag_id = forms.ModelChoiceField(connection.cursor().execute("SELECT tag_id FROM Tag"))
tag_name = forms.ChoiceField(choices = TAG_ID,
widget=forms.Select(attrs={'class':'form-control'}))
expiration_type = forms.ChoiceField(choices = EXP_TYPE,
widget=forms.Select(attrs={'class':'form-control exp_type','id':'exp_type'}))
latitude = forms.FloatField(required = False,
widget=forms.HiddenInput(attrs={'class':'form-control lat' , 'id': 'lat'}))
longitude = forms.FloatField(required = False,
widget=forms.HiddenInput(attrs={'class':'form-control lng', 'id': 'lng'}))
# tag = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),max_length=255)
start_time = forms.DateTimeField(required = False,
input_formats=['%Y-%m-%dT%H:%M'],
widget=forms.DateTimeInput(attrs={'class':'form-control','type':'datetime-local','id':'start_time'}),
initial=datetime.now().time())
#expiration = forms.DateTimeField(widget=forms.widgets.DateTimeInput(input_formats=["%d %b %Y %H:%M:%S %Z"]))
expiration = forms.DateTimeField(required = False, input_formats=['%Y-%m-%dT%H:%M'],widget=forms.DateTimeInput(attrs={'class':'form-control','type':'datetime-local','id':'expiration'}),initial=datetime.now().time())
description = forms.CharField(required = False, widget=forms.TextInput(attrs={'class':'form-control'}),max_length=1024)
location = forms.CharField(required = False, widget=forms.TextInput(attrs={'class':'form-control location', 'id': 'location'}),max_length=255)
class ItemFormMark(forms.Form):
# tag_id = forms.ModelChoiceField(connection.cursor().execute("SELECT tag_id FROM Tag"))
tag_name = forms.ChoiceField(choices = TAG_ID, widget=forms.Select(attrs={'class':'form-control'}))
expiration_type = forms.ChoiceField(choices = EXP_TYPE, widget=forms.Select(attrs={'class':'form-control exp_type_mark','id':'exp_type_mark'}))
latitude = forms.FloatField(required = False, widget=forms.HiddenInput(attrs={'class':'form-control lat_mark' , 'id': 'lat_mark'}))
longitude = forms.FloatField(required = False, widget=forms.HiddenInput(attrs={'class':'form-control lng_mark', 'id': 'lng_mark'}))
# tag = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),max_length=255)
start_time = forms.DateTimeField(required = False, input_formats=['%Y-%m-%dT%H:%M'], widget=forms.DateTimeInput(attrs={'class':'form-control','type':'datetime-local','id':'start_time_mark'}),initial=datetime.now().time())
#expiration = forms.DateTimeField(widget=forms.widgets.DateTimeInput(input_formats=["%d %b %Y %H:%M:%S %Z"]))
expiration = forms.DateTimeField(required = False, input_formats=['%Y-%m-%dT%H:%M'],widget=forms.DateTimeInput(attrs={'class':'form-control','type':'datetime-local','id':'expiration_mark'}),initial=datetime.now().time())
description = forms.CharField(required = False, widget=forms.TextInput(attrs={'class':'form-control'}),max_length=1024)
location = forms.CharField(required = False, widget=forms.TextInput(attrs={'class':'form-control location_mark', 'id': 'location_mark'}),max_length=255)
|
{
"content_hash": "67f76512c0d5de27e70790809247b4ff",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 225,
"avg_line_length": 69.11538461538461,
"alnum_prop": 0.6736227045075125,
"repo_name": "CraigRhodes/fs_site",
"id": "f55c888b247a89dd8a8e50d590f58c5f7c770bb5",
"size": "3594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freesources/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3391"
},
{
"name": "HTML",
"bytes": "42114"
},
{
"name": "JavaScript",
"bytes": "4687"
},
{
"name": "Makefile",
"bytes": "924"
},
{
"name": "Python",
"bytes": "22501"
},
{
"name": "Shell",
"bytes": "1832"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_server
short_description: Manages virtual servers on Vultr.
description:
- Deploy, start, stop, update, restart, reinstall servers.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the server.
required: true
aliases: [ label ]
hostname:
description:
- Hostname to assign to this server.
os:
description:
- The operating system.
- Required if the server does not yet exist and is not restoring from a snapshot.
snapshot:
version_added: "2.8"
description:
- Name of snapshot to restore server from.
firewall_group:
description:
- The firewall group to assign this server to.
plan:
description:
- Plan to use for the server.
- Required if the server does not yet exist.
force:
description:
- Force stop/start the server if required to apply changes
- Otherwise a running server will not be changed.
type: bool
notify_activate:
description:
- Whether to send an activation email when the server is ready or not.
- Only considered on creation.
type: bool
private_network_enabled:
description:
- Whether to enable private networking or not.
type: bool
auto_backup_enabled:
description:
- Whether to enable automatic backups or not.
type: bool
ipv6_enabled:
description:
- Whether to enable IPv6 or not.
type: bool
tag:
description:
- Tag for the server.
user_data:
description:
- User data to be passed to the server.
startup_script:
description:
- Name of the startup script to execute on boot.
- Only considered while creating the server.
ssh_keys:
description:
- List of SSH keys passed to the server on creation.
aliases: [ ssh_key ]
reserved_ip_v4:
description:
- IP address of the floating IP to use as the main IP of this server.
- Only considered on creation.
region:
description:
- Region the server is deployed into.
- Required if the server does not yet exist.
state:
description:
- State of the server.
default: present
choices: [ present, absent, restarted, reinstalled, started, stopped ]
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: create server
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
ssh_keys:
- my_key
- your_key
region: Amsterdam
state: present
- name: ensure a server is present and started
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
ssh_key: my_key
region: Amsterdam
state: started
- name: ensure a server is present and stopped
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
region: Amsterdam
state: stopped
- name: ensure an existing server is stopped
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
state: stopped
- name: ensure an existing server is started
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
state: started
- name: ensure a server is absent
local_action:
module: vultr_server
name: "{{ vultr_server_name }}"
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_server:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
id:
description: ID of the server
returned: success
type: str
sample: 10194376
name:
description: Name (label) of the server
returned: success
type: str
sample: "ansible-test-vm"
plan:
description: Plan used for the server
returned: success
type: str
sample: "1024 MB RAM,25 GB SSD,1.00 TB BW"
allowed_bandwidth_gb:
description: Allowed bandwidth to use in GB
returned: success
type: int
sample: 1000
auto_backup_enabled:
description: Whether automatic backups are enabled
returned: success
type: bool
sample: false
cost_per_month:
description: Cost per month for the server
returned: success
type: float
sample: 5.00
current_bandwidth_gb:
description: Current bandwidth used for the server
returned: success
type: int
sample: 0
date_created:
description: Date when the server was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
default_password:
description: Password to login as root into the server
returned: success
type: str
sample: "!p3EWYJm$qDWYaFr"
disk:
description: Information about the disk
returned: success
type: str
sample: "Virtual 25 GB"
v4_gateway:
description: IPv4 gateway
returned: success
type: str
sample: "45.32.232.1"
internal_ip:
description: Internal IP
returned: success
type: str
sample: ""
kvm_url:
description: URL to the VNC
returned: success
type: str
sample: "https://my.vultr.com/subs/vps/novnc/api.php?data=xyz"
region:
description: Region the server was deployed into
returned: success
type: str
sample: "Amsterdam"
v4_main_ip:
description: Main IPv4
returned: success
type: str
sample: "45.32.233.154"
v4_netmask:
description: Netmask IPv4
returned: success
type: str
sample: "255.255.254.0"
os:
description: Operating system used for the server
returned: success
type: str
sample: "CentOS 6 x64"
firewall_group:
description: Firewall group the server is assinged to
returned: success and available
type: str
sample: "CentOS 6 x64"
pending_charges:
description: Pending charges
returned: success
type: float
sample: 0.01
power_status:
description: Power status of the server
returned: success
type: str
sample: "running"
ram:
description: Information about the RAM size
returned: success
type: str
sample: "1024 MB"
server_state:
description: State about the server
returned: success
type: str
sample: "ok"
status:
description: Status about the deployment of the server
returned: success
type: str
sample: "active"
tag:
description: TBD
returned: success
type: str
sample: ""
v6_main_ip:
description: Main IPv6
returned: success
type: str
sample: ""
v6_network:
description: Network IPv6
returned: success
type: str
sample: ""
v6_network_size:
description: Network size IPv6
returned: success
type: str
sample: ""
v6_networks:
description: Networks IPv6
returned: success
type: list
sample: []
vcpu_count:
description: Virtual CPU count
returned: success
type: int
sample: 1
'''
import time
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrServer(Vultr):
def __init__(self, module):
super(AnsibleVultrServer, self).__init__(module, "vultr_server")
self.server = None
self.returns = {
'SUBID': dict(key='id'),
'label': dict(key='name'),
'date_created': dict(),
'allowed_bandwidth_gb': dict(convert_to='int'),
'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'),
'current_bandwidth_gb': dict(),
'kvm_url': dict(),
'default_password': dict(),
'internal_ip': dict(),
'disk': dict(),
'cost_per_month': dict(convert_to='float'),
'location': dict(key='region'),
'main_ip': dict(key='v4_main_ip'),
'network_v4': dict(key='v4_network'),
'gateway_v4': dict(key='v4_gateway'),
'os': dict(),
'pending_charges': dict(convert_to='float'),
'power_status': dict(),
'ram': dict(),
'plan': dict(),
'server_state': dict(),
'status': dict(),
'firewall_group': dict(),
'tag': dict(),
'v6_main_ip': dict(),
'v6_network': dict(),
'v6_network_size': dict(),
'v6_networks': dict(),
'vcpu_count': dict(convert_to='int'),
}
self.server_power_state = None
def get_startup_script(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('startup_script'),
resource='startupscript',
)
def get_os(self):
if self.module.params.get('snapshot'):
os_name = 'Snapshot'
else:
os_name = self.module.params.get('os')
return self.query_resource_by_key(
key='name',
value=os_name,
resource='os',
use_cache=True
)
def get_snapshot(self):
return self.query_resource_by_key(
key='description',
value=self.module.params.get('snapshot'),
resource='snapshot',
use_cache=True
)
def get_ssh_keys(self):
ssh_key_names = self.module.params.get('ssh_keys')
if not ssh_key_names:
return []
ssh_keys = []
for ssh_key_name in ssh_key_names:
ssh_key = self.query_resource_by_key(
key='name',
value=ssh_key_name,
resource='sshkey',
use_cache=True
)
if ssh_key:
ssh_keys.append(ssh_key)
return ssh_keys
def get_region(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('region'),
resource='regions',
use_cache=True
)
def get_plan(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('plan'),
resource='plans',
use_cache=True
)
def get_firewall_group(self):
return self.query_resource_by_key(
key='description',
value=self.module.params.get('firewall_group'),
resource='firewall',
query_by='group_list'
)
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = to_text(base64.b64encode(to_bytes(user_data)))
return user_data
def get_server_user_data(self, server):
if not server or not server.get('SUBID'):
return None
user_data = self.api_query(path="/v1/server/get_user_data?SUBID=%s" % server.get('SUBID'))
return user_data.get('userdata')
def get_server(self, refresh=False):
if self.server is None or refresh:
self.server = None
server_list = self.api_query(path="/v1/server/list")
if server_list:
for server_id, server_data in server_list.items():
if server_data.get('label') == self.module.params.get('name'):
self.server = server_data
plan = self.query_resource_by_key(
key='VPSPLANID',
value=server_data['VPSPLANID'],
resource='plans',
use_cache=True
)
self.server['plan'] = plan.get('name')
os = self.query_resource_by_key(
key='OSID',
value=int(server_data['OSID']),
resource='os',
use_cache=True
)
self.server['os'] = os.get('name')
fwg_id = server_data.get('FIREWALLGROUPID')
fw = self.query_resource_by_key(
key='FIREWALLGROUPID',
value=server_data.get('FIREWALLGROUPID') if fwg_id and fwg_id != "0" else None,
resource='firewall',
query_by='group_list',
use_cache=True
)
self.server['firewall_group'] = fw.get('description')
return self.server
def present_server(self, start_server=True):
server = self.get_server()
if not server:
server = self._create_server(server=server)
else:
server = self._update_server(server=server, start_server=start_server)
return server
def _create_server(self, server=None):
required_params = [
'os',
'plan',
'region',
]
snapshot_restore = self.module.params.get('snapshot') is not None
if snapshot_restore:
required_params.remove('os')
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
if not self.module.check_mode:
data = {
'DCID': self.get_region().get('DCID'),
'VPSPLANID': self.get_plan().get('VPSPLANID'),
'FIREWALLGROUPID': self.get_firewall_group().get('FIREWALLGROUPID'),
'OSID': self.get_os().get('OSID'),
'SNAPSHOTID': self.get_snapshot().get('SNAPSHOTID'),
'label': self.module.params.get('name'),
'hostname': self.module.params.get('hostname'),
'SSHKEYID': ','.join([ssh_key['SSHKEYID'] for ssh_key in self.get_ssh_keys()]),
'enable_ipv6': self.get_yes_or_no('ipv6_enabled'),
'enable_private_network': self.get_yes_or_no('private_network_enabled'),
'auto_backups': self.get_yes_or_no('auto_backup_enabled'),
'notify_activate': self.get_yes_or_no('notify_activate'),
'tag': self.module.params.get('tag'),
'reserved_ip_v4': self.module.params.get('reserved_ip_v4'),
'user_data': self.get_user_data(),
'SCRIPTID': self.get_startup_script().get('SCRIPTID'),
}
self.api_query(
path="/v1/server/create",
method="POST",
data=data
)
server = self._wait_for_state(key='status', state='active')
server = self._wait_for_state(state='running', timeout=3600 if snapshot_restore else 60)
return server
def _update_auto_backups_setting(self, server, start_server):
auto_backup_enabled_changed = self.switch_enable_disable(server, 'auto_backup_enabled', 'auto_backups')
if auto_backup_enabled_changed:
if auto_backup_enabled_changed == "enable" and server['auto_backups'] == 'disable':
self.module.warn("Backups are disabled. Once disabled, backups can only be enabled again by customer support")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['auto_backup_enabled'] = server.get('auto_backups')
self.result['diff']['after']['auto_backup_enabled'] = self.get_yes_or_no('auto_backup_enabled')
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/backup_%s" % auto_backup_enabled_changed,
method="POST",
data=data
)
return server
def _update_ipv6_setting(self, server, start_server):
ipv6_enabled_changed = self.switch_enable_disable(server, 'ipv6_enabled', 'v6_main_ip')
if ipv6_enabled_changed:
if ipv6_enabled_changed == "disable":
self.module.warn("The Vultr API does not allow to disable IPv6")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['ipv6_enabled'] = False
self.result['diff']['after']['ipv6_enabled'] = True
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/ipv6_%s" % ipv6_enabled_changed,
method="POST",
data=data
)
server = self._wait_for_state(key='v6_main_ip')
return server
def _update_private_network_setting(self, server, start_server):
private_network_enabled_changed = self.switch_enable_disable(server, 'private_network_enabled', 'internal_ip')
if private_network_enabled_changed:
if private_network_enabled_changed == "disable":
self.module.warn("The Vultr API does not allow to disable private network")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['private_network_enabled'] = False
self.result['diff']['after']['private_network_enabled'] = True
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/private_network_%s" % private_network_enabled_changed,
method="POST",
data=data
)
return server
def _update_plan_setting(self, server, start_server):
plan = self.get_plan()
plan_changed = True if plan and plan['VPSPLANID'] != server.get('VPSPLANID') else False
if plan_changed:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['plan'] = server.get('plan')
self.result['diff']['after']['plan'] = plan['name']
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'VPSPLANID': plan['VPSPLANID'],
}
self.api_query(
path="/v1/server/upgrade_plan",
method="POST",
data=data
)
return server
def _handle_power_status_for_update(self, server, start_server):
# Remember the power state before we handle any action
if self.server_power_state is None:
self.server_power_state = server['power_status']
# A stopped server can be updated
if self.server_power_state == "stopped":
return server, False
# A running server must be forced to update unless the wanted state is stopped
elif self.module.params.get('force') or not start_server:
warned = False
if not self.module.check_mode:
# Some update APIs would restart the VM, we handle the restart manually
# by stopping the server and start it at the end of the changes
server = self.stop_server(skip_results=True)
# Warn the user that a running server won't get changed
else:
warned = True
self.module.warn("Some changes won't be applied to running instances. " +
"Use force=true to allow the instance %s to be stopped/started." % server['label'])
return server, warned
def _update_server(self, server=None, start_server=True):
# Wait for server to unlock if restoring
if server.get('os').strip() == 'Snapshot':
server = self._wait_for_state(key='server_status', state='ok', timeout=3600)
# Update auto backups settings, stops server
server = self._update_auto_backups_setting(server=server, start_server=start_server)
# Update IPv6 settings, stops server
server = self._update_ipv6_setting(server=server, start_server=start_server)
# Update private network settings, stops server
server = self._update_private_network_setting(server=server, start_server=start_server)
# Update plan settings, stops server
server = self._update_plan_setting(server=server, start_server=start_server)
# User data
user_data = self.get_user_data()
server_user_data = self.get_server_user_data(server=server)
if user_data is not None and user_data != server_user_data:
self.result['changed'] = True
self.result['diff']['before']['user_data'] = server_user_data
self.result['diff']['after']['user_data'] = user_data
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'userdata': user_data,
}
self.api_query(
path="/v1/server/set_user_data",
method="POST",
data=data
)
# Tags
tag = self.module.params.get('tag')
if tag is not None and tag != server.get('tag'):
self.result['changed'] = True
self.result['diff']['before']['tag'] = server.get('tag')
self.result['diff']['after']['tag'] = tag
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'tag': tag,
}
self.api_query(
path="/v1/server/tag_set",
method="POST",
data=data
)
# Firewall group
firewall_group = self.get_firewall_group()
if firewall_group and firewall_group.get('description') != server.get('firewall_group'):
self.result['changed'] = True
self.result['diff']['before']['firewall_group'] = server.get('firewall_group')
self.result['diff']['after']['firewall_group'] = firewall_group.get('description')
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'FIREWALLGROUPID': firewall_group.get('FIREWALLGROUPID'),
}
self.api_query(
path="/v1/server/firewall_group_set",
method="POST",
data=data
)
# Start server again if it was running before the changes
if not self.module.check_mode:
if self.server_power_state in ['starting', 'running'] and start_server:
server = self.start_server(skip_results=True)
server = self._wait_for_state(key='status', state='active')
return server
def absent_server(self):
server = self.get_server()
if server:
self.result['changed'] = True
self.result['diff']['before']['id'] = server['SUBID']
self.result['diff']['after']['id'] = ""
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/destroy",
method="POST",
data=data
)
for s in range(0, 60):
if server is not None:
break
time.sleep(2)
server = self.get_server(refresh=True)
else:
self.fail_json(msg="Wait for server '%s' to get deleted timed out" % server['label'])
return server
def restart_server(self):
self.result['changed'] = True
server = self.get_server()
if server:
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/reboot",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def reinstall_server(self):
self.result['changed'] = True
server = self.get_server()
if server:
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/reinstall",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def _wait_for_state(self, key='power_status', state=None, timeout=60):
time.sleep(1)
server = self.get_server(refresh=True)
for s in range(0, timeout):
# Check for Truely if wanted state is None
if state is None and server.get(key):
break
elif server.get(key) == state:
break
time.sleep(2)
server = self.get_server(refresh=True)
# Timed out
else:
if state is None:
msg = "Wait for '%s' timed out" % key
else:
msg = "Wait for '%s' to get into state '%s' timed out" % (key, state)
self.fail_json(msg=msg)
return server
def start_server(self, skip_results=False):
server = self.get_server()
if server:
if server['power_status'] == 'starting':
server = self._wait_for_state(state='running')
elif server['power_status'] != 'running':
if not skip_results:
self.result['changed'] = True
self.result['diff']['before']['power_status'] = server['power_status']
self.result['diff']['after']['power_status'] = "running"
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/start",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def stop_server(self, skip_results=False):
server = self.get_server()
if server and server['power_status'] != "stopped":
if not skip_results:
self.result['changed'] = True
self.result['diff']['before']['power_status'] = server['power_status']
self.result['diff']['after']['power_status'] = "stopped"
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
}
self.api_query(
path="/v1/server/halt",
method="POST",
data=data
)
server = self._wait_for_state(state='stopped')
return server
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['label']),
hostname=dict(),
os=dict(),
snapshot=dict(),
plan=dict(),
force=dict(type='bool', default=False),
notify_activate=dict(type='bool', default=False),
private_network_enabled=dict(type='bool'),
auto_backup_enabled=dict(type='bool'),
ipv6_enabled=dict(type='bool'),
tag=dict(),
reserved_ip_v4=dict(),
firewall_group=dict(),
startup_script=dict(),
user_data=dict(),
ssh_keys=dict(type='list', aliases=['ssh_key']),
region=dict(),
state=dict(choices=['present', 'absent', 'restarted', 'reinstalled', 'started', 'stopped'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
vultr_server = AnsibleVultrServer(module)
if module.params.get('state') == "absent":
server = vultr_server.absent_server()
else:
if module.params.get('state') == "started":
server = vultr_server.present_server()
server = vultr_server.start_server()
elif module.params.get('state') == "stopped":
server = vultr_server.present_server(start_server=False)
server = vultr_server.stop_server()
elif module.params.get('state') == "restarted":
server = vultr_server.present_server()
server = vultr_server.restart_server()
elif module.params.get('state') == "reinstalled":
server = vultr_server.reinstall_server()
else:
server = vultr_server.present_server()
result = vultr_server.get_result(server)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
{
"content_hash": "5008b85a0a3140cd659635f30d786a08",
"timestamp": "",
"source": "github",
"line_count": 909,
"max_line_length": 126,
"avg_line_length": 34.31793179317932,
"alnum_prop": 0.5354704279531977,
"repo_name": "SergeyCherepanov/ansible",
"id": "9f6cc7a267d76c04f0433d17d67e9ad17d23ee6c",
"size": "31379",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/vultr/vultr_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import logging
import json
import sys
from django.urls import reverse
from nose.tools import assert_equal, assert_not_equal, assert_true, assert_false
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.connectors.models import Connector
from useradmin.models import User
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock, MagicMock
else:
from mock import patch, Mock, MagicMock
LOG = logging.getLogger(__name__)
class TestInstallExamples():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=True, is_admin=True)
self.user = User.objects.get(username="test")
def test_install_via_insert_mysql(self):
with patch('notebook.views.Connector.objects') as ConnectorObjects:
with patch('notebook.views.get_interpreter') as get_interpreter:
with patch('notebook.connectors.base.get_ordered_interpreters') as get_ordered_interpreters:
with patch('beeswax.management.commands.beeswax_install_examples.make_notebook') as make_notebook:
ConnectorObjects.get = Mock(
return_value=Connector(
id=10,
name='MySql',
dialect='mysql',
),
)
get_interpreter.return_value = {'type': 10, 'dialect': 'mysql'}
get_ordered_interpreters.return_value = [
{
'name': 'MySql',
'type': 10,
'dialect': 'mysql',
'interface': 'sqlalchemy',
}
]
resp = self.client.post(reverse('notebook:install_examples'), {'db_name': 'default', 'dialect': 'mysql'})
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_equal(
'Query Sample: Salary Analysis mysql installed. '
'Table default.employe_sample installed.',
data['message'],
data
)
assert_equal('', data['errorMessage'], data)
make_notebook.assert_called()
def test_install_via_load_hive(self):
with patch('notebook.views.Connector.objects') as ConnectorObjects:
with patch('notebook.views.get_interpreter') as get_interpreter:
with patch('notebook.connectors.base.get_ordered_interpreters') as get_ordered_interpreters:
with patch('beeswax.management.commands.beeswax_install_examples.make_notebook') as make_notebook:
with patch('beeswax.management.commands.beeswax_install_examples.has_concurrency_support') as has_concurrency_support:
with patch('beeswax.management.commands.beeswax_install_examples.cluster.get_hdfs') as get_hdfs:
ConnectorObjects.get = Mock(
return_value=Connector(
id=10,
name='MyHive',
dialect='hive',
),
)
get_interpreter.return_value = {'type': 10, 'dialect': 'hive'}
get_ordered_interpreters.return_value = [
{
'name': 'MyHive',
'type': 10,
'dialect': 'hive',
'interface': 'hiveserver',
}
]
has_concurrency_support.return_value = False
fs = Mock(copyFromLocal=Mock())
get_hdfs.return_value = fs
resp = self.client.post(reverse('notebook:install_examples'), {'db_name': 'default'})
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_equal(
'Query Sample: Top salary hive installed. '
'Query Sample: Salary growth hive installed. '
'Query Sample: Job loss hive installed. '
'Query Sample: Customers hive installed. '
'Table default.sample_07 installed. '
'Table default.sample_08 installed. '
'Table default.customers installed. '
'Table default.web_logs installed.',
data['message'],
data
)
assert_equal('', data['errorMessage'], data)
make_notebook.assert_called()
fs.do_as_user.assert_called()
def test_install_via_insert_hive(self):
with patch('notebook.views.Connector.objects') as ConnectorObjects:
with patch('notebook.views.get_interpreter') as get_interpreter:
with patch('notebook.connectors.base.get_ordered_interpreters') as get_ordered_interpreters:
with patch('beeswax.management.commands.beeswax_install_examples.make_notebook') as make_notebook:
with patch('beeswax.management.commands.beeswax_install_examples.has_concurrency_support') as has_concurrency_support:
ConnectorObjects.get = Mock(
return_value=Connector(
id=10,
name='MyHive',
dialect='hive',
),
)
get_interpreter.return_value = {'type': 10, 'dialect': 'hive'}
get_ordered_interpreters.return_value = [
{
'name': 'MyHive',
'type': 10,
'dialect': 'hive',
'interface': 'hiveserver',
}
]
has_concurrency_support.return_value = True
resp = self.client.post(reverse('notebook:install_examples'), {'db_name': 'default'})
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_equal(
'Query Sample: Top salary hive installed. '
'Query Sample: Salary growth hive installed. '
'Query Sample: Job loss hive installed. '
'Query Sample: Customers hive installed. ' # Ideally should not be installed as table not installed
'Table default.sample_07 installed. '
'Table default.sample_08 installed. '
# 'Table default.customers installed. ' # Not supported via INSERT yet
'Table default.web_logs installed.',
data['message'],
data
)
assert_equal('', data['errorMessage'], data)
make_notebook.assert_called()
|
{
"content_hash": "4d09bae8f42375e858084e43fef3f9b1",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 130,
"avg_line_length": 39.55421686746988,
"alnum_prop": 0.5540664026804751,
"repo_name": "kawamon/hue",
"id": "01e486d1b0e3dad79feebe7c5fc21a347e0ca62d",
"size": "7382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/notebook/src/notebook/views_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from scrapy.item import Item, Field
class Car(Item):
title = Field()
year = Field()
price = Field()
miles = Field()
color = Field()
transmission = Field()
vin = Field()
url = Field()
|
{
"content_hash": "8e4161ac2f750db97e89cc13d3660a20",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 35,
"avg_line_length": 18.083333333333332,
"alnum_prop": 0.5668202764976958,
"repo_name": "JeffPaine/subaru_search",
"id": "a330b66728b310b04698a83de2c02693b1e625c6",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subaru/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4688"
}
],
"symlink_target": ""
}
|
"""
Django settings for atcui project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'luj_jam&2nca+r$5sh_l7rm5tu3zl$u!d&h!f*0hji2x9av!zm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
ATC_API = {
'ATCD_HOST': 'localhost',
'ATCD_PORT': 9091,
'DEFAULT_TC_TIMEOUT': 24 * 60 * 60,
'PROXY_IPS': ['127.0.0.1'],
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Django debug
'django_pdb',
# Django ATC api
'rest_framework',
'atc_api',
# Django ATC Demo UI
'bootstrap_themes',
'django_static_jquery',
'atc_demo_ui',
# Django ATC Profile Storage
'atc_profile_storage'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'atcui.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'atcui.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "b965a6940e95f4626a9236c0b91f2919",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 71,
"avg_line_length": 25.445378151260503,
"alnum_prop": 0.6743725231175693,
"repo_name": "duydb2/ZTC",
"id": "ff02ab0011466e8d00e347e5c2b24ecea3e15574",
"size": "3028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atcui/atcui/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "668"
},
{
"name": "HTML",
"bytes": "11775"
},
{
"name": "JavaScript",
"bytes": "34376"
},
{
"name": "Makefile",
"bytes": "3228"
},
{
"name": "Python",
"bytes": "160618"
},
{
"name": "Ruby",
"bytes": "28413"
},
{
"name": "Shell",
"bytes": "7912"
},
{
"name": "Thrift",
"bytes": "3537"
}
],
"symlink_target": ""
}
|
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class SnapshotsActionsV2Test(base.BaseVolumeAdminTest):
@classmethod
def skip_checks(cls):
super(SnapshotsActionsV2Test, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder snapshot feature disabled")
@classmethod
def setup_clients(cls):
super(SnapshotsActionsV2Test, cls).setup_clients()
cls.client = cls.snapshots_client
@classmethod
def resource_setup(cls):
super(SnapshotsActionsV2Test, cls).resource_setup()
# Create a test shared volume for tests
vol_name = data_utils.rand_name(cls.__name__ + '-Volume')
cls.name_field = cls.special_fields['name_field']
params = {cls.name_field: vol_name}
cls.volume = \
cls.volumes_client.create_volume(**params)['volume']
cls.volumes_client.wait_for_volume_status(cls.volume['id'],
'available')
# Create a test shared snapshot for tests
snap_name = data_utils.rand_name(cls.__name__ + '-Snapshot')
params = {cls.name_field: snap_name}
cls.snapshot = cls.client.create_snapshot(
volume_id=cls.volume['id'], **params)['snapshot']
cls.client.wait_for_snapshot_status(cls.snapshot['id'],
'available')
@classmethod
def resource_cleanup(cls):
# Delete the test snapshot
cls.client.delete_snapshot(cls.snapshot['id'])
cls.client.wait_for_resource_deletion(cls.snapshot['id'])
# Delete the test volume
cls.volumes_client.delete_volume(cls.volume['id'])
cls.volumes_client.wait_for_resource_deletion(cls.volume['id'])
super(SnapshotsActionsV2Test, cls).resource_cleanup()
def tearDown(self):
# Set snapshot's status to available after test
status = 'available'
snapshot_id = self.snapshot['id']
self.admin_snapshots_client.reset_snapshot_status(snapshot_id,
status)
super(SnapshotsActionsV2Test, self).tearDown()
def _create_reset_and_force_delete_temp_snapshot(self, status=None):
# Create snapshot, reset snapshot status,
# and force delete temp snapshot
temp_snapshot = self.create_snapshot(volume_id=self.volume['id'])
if status:
self.admin_snapshots_client.\
reset_snapshot_status(temp_snapshot['id'], status)
self.admin_snapshots_client.\
force_delete_snapshot(temp_snapshot['id'])
self.client.wait_for_resource_deletion(temp_snapshot['id'])
def _get_progress_alias(self):
return 'os-extended-snapshot-attributes:progress'
@test.idempotent_id('3e13ca2f-48ea-49f3-ae1a-488e9180d535')
def test_reset_snapshot_status(self):
# Reset snapshot status to creating
status = 'creating'
self.admin_snapshots_client.\
reset_snapshot_status(self.snapshot['id'], status)
snapshot_get = self.admin_snapshots_client.show_snapshot(
self.snapshot['id'])['snapshot']
self.assertEqual(status, snapshot_get['status'])
@test.idempotent_id('41288afd-d463-485e-8f6e-4eea159413eb')
def test_update_snapshot_status(self):
# Reset snapshot status to creating
status = 'creating'
self.admin_snapshots_client.\
reset_snapshot_status(self.snapshot['id'], status)
# Update snapshot status to error
progress = '80%'
status = 'error'
progress_alias = self._get_progress_alias()
self.client.update_snapshot_status(self.snapshot['id'],
status=status, progress=progress)
snapshot_get = self.admin_snapshots_client.show_snapshot(
self.snapshot['id'])['snapshot']
self.assertEqual(status, snapshot_get['status'])
self.assertEqual(progress, snapshot_get[progress_alias])
@test.idempotent_id('05f711b6-e629-4895-8103-7ca069f2073a')
def test_snapshot_force_delete_when_snapshot_is_creating(self):
# test force delete when status of snapshot is creating
self._create_reset_and_force_delete_temp_snapshot('creating')
@test.idempotent_id('92ce8597-b992-43a1-8868-6316b22a969e')
def test_snapshot_force_delete_when_snapshot_is_deleting(self):
# test force delete when status of snapshot is deleting
self._create_reset_and_force_delete_temp_snapshot('deleting')
@test.idempotent_id('645a4a67-a1eb-4e8e-a547-600abac1525d')
def test_snapshot_force_delete_when_snapshot_is_error(self):
# test force delete when status of snapshot is error
self._create_reset_and_force_delete_temp_snapshot('error')
@test.idempotent_id('bf89080f-8129-465e-9327-b2f922666ba5')
def test_snapshot_force_delete_when_snapshot_is_error_deleting(self):
# test force delete when status of snapshot is error_deleting
self._create_reset_and_force_delete_temp_snapshot('error_deleting')
class SnapshotsActionsV1Test(SnapshotsActionsV2Test):
_api_version = 1
|
{
"content_hash": "648131a71f2b0c2d7fd1d10911d4d111",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 76,
"avg_line_length": 42.26190476190476,
"alnum_prop": 0.6492018779342723,
"repo_name": "nuagenetworks/tempest",
"id": "f2bf613a1a75442d40ab8ea385a2136f6f7f647f",
"size": "5968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/volume/admin/test_snapshots_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from .builder import PRIOR_GENERATORS
@PRIOR_GENERATORS.register_module()
class AnchorGenerator:
"""Standard anchor generator for 2D anchor-based detectors.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels in order (w, h).
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int] | None): The basic sizes
of anchors in multiple levels.
If None is given, strides will be used as base_sizes.
(If strides are non square, the shortest stride is taken.)
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. If a list of tuple of
float is given, they will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0 in V2.0.
Examples:
>>> from mmdet.core import AnchorGenerator
>>> self = AnchorGenerator([16], [1.], [1.], [9])
>>> all_anchors = self.grid_priors([(2, 2)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]])]
>>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
>>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]]), \
tensor([[-9., -9., 9., 9.]])]
"""
def __init__(self,
strides,
ratios,
scales=None,
base_sizes=None,
scale_major=True,
octave_base_scale=None,
scales_per_octave=None,
centers=None,
center_offset=0.):
# check center and center_offset
if center_offset != 0:
assert centers is None, 'center cannot be set when center_offset' \
f'!=0, {centers} is given.'
if not (0 <= center_offset <= 1):
raise ValueError('center_offset should be in range [0, 1], '
f'{center_offset} is given.')
if centers is not None:
assert len(centers) == len(strides), \
'The number of strides should be the same as centers, got ' \
f'{strides} and {centers}'
# calculate base sizes of anchors
self.strides = [_pair(stride) for stride in strides]
self.base_sizes = [min(stride) for stride in self.strides
] if base_sizes is None else base_sizes
assert len(self.base_sizes) == len(self.strides), \
'The number of strides should be the same as base sizes, got ' \
f'{self.strides} and {self.base_sizes}'
# calculate scales of anchors
assert ((octave_base_scale is not None
and scales_per_octave is not None) ^ (scales is not None)), \
'scales and octave_base_scale with scales_per_octave cannot' \
' be set at the same time'
if scales is not None:
self.scales = torch.Tensor(scales)
elif octave_base_scale is not None and scales_per_octave is not None:
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
scales = octave_scales * octave_base_scale
self.scales = torch.Tensor(scales)
else:
raise ValueError('Either scales or octave_base_scale with '
'scales_per_octave should be set')
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.centers = centers
self.center_offset = center_offset
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
"""list[int]: total number of base anchors in a feature grid"""
return self.num_base_priors
@property
def num_base_priors(self):
"""list[int]: The number of priors (anchors) at a point
on the feature grid"""
return [base_anchors.size(0) for base_anchors in self.base_anchors]
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.strides)
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(
base_size,
scales=self.scales,
ratios=self.ratios,
center=center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self,
base_size,
scales,
ratios,
center=None):
"""Generate base anchors of a single level.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between between the height
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * w
y_center = self.center_offset * h
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
y_center + 0.5 * hs
]
base_anchors = torch.stack(base_anchors, dim=-1)
return base_anchors
def _meshgrid(self, x, y, row_major=True):
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool, optional): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
# use shape instead of len to keep tracing while exporting to onnx
xx = x.repeat(y.shape[0])
yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
dtype (:obj:`torch.dtype`): Dtype of priors.
Default: torch.float32.
device (str): The device where the anchors will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_priors(
featmap_sizes[i], level_idx=i, dtype=dtype, device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_priors(self,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda'):
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def sparse_priors(self,
prior_idxs,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda'):
"""Generate sparse anchors according to the ``prior_idxs``.
Args:
prior_idxs (Tensor): The index of corresponding anchors
in the feature map.
featmap_size (tuple[int]): feature map size arrange as (h, w).
level_idx (int): The level index of corresponding feature
map.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (obj:`torch.device`): The device where the points is
located.
Returns:
Tensor: Anchor with shape (N, 4), N should be equal to
the length of ``prior_idxs``.
"""
height, width = featmap_size
num_base_anchors = self.num_base_anchors[level_idx]
base_anchor_id = prior_idxs % num_base_anchors
x = (prior_idxs //
num_base_anchors) % width * self.strides[level_idx][0]
y = (prior_idxs // width //
num_base_anchors) % height * self.strides[level_idx][1]
priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \
self.base_anchors[level_idx][base_anchor_id, :].to(device)
return priors
def grid_anchors(self, featmap_sizes, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
device (str): Device where the anchors will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
warnings.warn('``grid_anchors`` would be deprecated soon. '
'Please use ``grid_priors`` ')
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_anchors(
self.base_anchors[i].to(device),
featmap_sizes[i],
self.strides[i],
device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_anchors(self,
base_anchors,
featmap_size,
stride=(16, 16),
device='cuda'):
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_anchors``.
Args:
base_anchors (torch.Tensor): The base anchors of a feature grid.
featmap_size (tuple[int]): Size of the feature maps.
stride (tuple[int], optional): Stride of the feature map in order
(w, h). Defaults to (16, 16).
device (str, optional): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
warnings.warn(
'``single_level_grid_anchors`` would be deprecated soon. '
'Please use ``single_level_grid_priors`` ')
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride[0]
shift_y = torch.arange(0, feat_h, device=device) * stride[1]
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
"""Generate valid flags of anchors in multiple feature levels.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in
multiple feature levels.
pad_shape (tuple): The padded shape of the image.
device (str): Device where the anchors will be put on.
Return:
list(torch.Tensor): Valid flags of anchors in multiple levels.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = pad_shape[:2]
valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w),
(valid_feat_h, valid_feat_w),
self.num_base_anchors[i],
device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self,
featmap_size,
valid_size,
num_base_anchors,
device='cuda'):
"""Generate the valid flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps, arrange
as (h, w).
valid_size (tuple[int]): The valid size of the feature maps.
num_base_anchors (int): The number of base anchors.
device (str, optional): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(valid.size(0),
num_base_anchors).contiguous().view(-1)
return valid
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}octave_base_scale='
repr_str += f'{self.octave_base_scale},\n'
repr_str += f'{indent_str}scales_per_octave='
repr_str += f'{self.scales_per_octave},\n'
repr_str += f'{indent_str}num_levels={self.num_levels}\n'
repr_str += f'{indent_str}centers={self.centers},\n'
repr_str += f'{indent_str}center_offset={self.center_offset})'
return repr_str
@PRIOR_GENERATORS.register_module()
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
min_sizes (list[float]): The list of minimum anchor sizes on each
level.
max_sizes (list[float]): The list of maximum anchor sizes on each
level.
basesize_ratio_range (tuple(float)): Ratio range of anchors. Being
used when not setting min_sizes and max_sizes.
input_size (int): Size of feature map, 300 for SSD300, 512 for
SSD512. Being used when not setting min_sizes and max_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
min_sizes=None,
max_sizes=None,
basesize_ratio_range=(0.15, 0.9),
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert not (min_sizes is None) ^ (max_sizes is None)
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
if min_sizes is None and max_sizes is None:
# use hard code to generate SSD anchors
self.input_size = input_size
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
raise ValueError(
'basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
raise ValueError(
'When not setting min_sizes and max_sizes,'
'basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
raise ValueError(
'Only support 300 or 512 in SSDAnchorGenerator when '
'not setting min_sizes and max_sizes, '
f'got {self.input_size}.')
assert len(min_sizes) == len(max_sizes) == len(strides)
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
@PRIOR_GENERATORS.register_module()
class LegacyAnchorGenerator(AnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
Note:
Difference to the V2.0 anchor generator:
1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
2. The width/height are minused by 1 when calculating the anchors' \
centers and corners to meet the V1.x coordinate system.
3. The anchors' corners are quantized.
Args:
strides (list[int] | list[tuple[int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int]): The basic sizes of anchors in multiple levels.
If None is given, strides will be used to generate base_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. It a list of float
is given, this list will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0.5 in V2.0 but it should be 0.5
in v1.x models.
Examples:
>>> from mmdet.core import LegacyAnchorGenerator
>>> self = LegacyAnchorGenerator(
>>> [16], [1.], [1.], [9], center_offset=0.5)
>>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
>>> print(all_anchors)
[tensor([[ 0., 0., 8., 8.],
[16., 0., 24., 8.],
[ 0., 16., 8., 24.],
[16., 16., 24., 24.]])]
"""
def gen_single_level_base_anchors(self,
base_size,
scales,
ratios,
center=None):
"""Generate base anchors of a single level.
Note:
The width/height of anchors are minused by 1 when calculating \
the centers and corners to meet the V1.x coordinate system.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between between the height.
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature map.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * (w - 1)
y_center = self.center_offset * (h - 1)
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
]
base_anchors = torch.stack(base_anchors, dim=-1).round()
return base_anchors
@PRIOR_GENERATORS.register_module()
class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
can be found in `LegacyAnchorGenerator`.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
super(LegacySSDAnchorGenerator, self).__init__(
strides=strides,
ratios=ratios,
basesize_ratio_range=basesize_ratio_range,
input_size=input_size,
scale_major=scale_major)
self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
for stride in strides]
self.base_anchors = self.gen_base_anchors()
@PRIOR_GENERATORS.register_module()
class YOLOAnchorGenerator(AnchorGenerator):
"""Anchor generator for YOLO.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
base_sizes (list[list[tuple[int, int]]]): The basic sizes
of anchors in multiple levels.
"""
def __init__(self, strides, base_sizes):
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.base_sizes = []
num_anchor_per_level = len(base_sizes[0])
for base_sizes_per_level in base_sizes:
assert num_anchor_per_level == len(base_sizes_per_level)
self.base_sizes.append(
[_pair(base_size) for base_size in base_sizes_per_level])
self.base_anchors = self.gen_base_anchors()
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.base_sizes)
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_sizes_per_level in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(base_sizes_per_level,
center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
"""Generate base anchors of a single level.
Args:
base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
anchors.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
x_center, y_center = center
base_anchors = []
for base_size in base_sizes_per_level:
w, h = base_size
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchor = torch.Tensor([
x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
y_center + 0.5 * h
])
base_anchors.append(base_anchor)
base_anchors = torch.stack(base_anchors, dim=0)
return base_anchors
def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
"""Generate responsible anchor flags of grid cells in multiple scales.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in multiple
feature levels.
gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
device (str): Device where the anchors will be put on.
Return:
list(torch.Tensor): responsible flags of anchors in multiple level
"""
assert self.num_levels == len(featmap_sizes)
multi_level_responsible_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
flags = self.single_level_responsible_flags(
featmap_sizes[i],
gt_bboxes,
anchor_stride,
self.num_base_anchors[i],
device=device)
multi_level_responsible_flags.append(flags)
return multi_level_responsible_flags
def single_level_responsible_flags(self,
featmap_size,
gt_bboxes,
stride,
num_base_anchors,
device='cuda'):
"""Generate the responsible flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps.
gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
stride (tuple(int)): stride of current level
num_base_anchors (int): The number of base anchors.
device (str, optional): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
# row major indexing
gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
responsible_grid = torch.zeros(
feat_h * feat_w, dtype=torch.uint8, device=device)
responsible_grid[gt_bboxes_grid_idx] = 1
responsible_grid = responsible_grid[:, None].expand(
responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
return responsible_grid
|
{
"content_hash": "3e0fb1a45c46d72ae82d82b3e3574b2b",
"timestamp": "",
"source": "github",
"line_count": 865,
"max_line_length": 79,
"avg_line_length": 42.95606936416185,
"alnum_prop": 0.5461151330839411,
"repo_name": "open-mmlab/mmdetection",
"id": "20886fbda65dbf0737565ec6dba59e9fc7bb73ff",
"size": "37205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmdet/core/anchor/anchor_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
}
|
import unittest
class TestImporters(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "bf8fb2f2c8aae5bd37a142c18577a9fe",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 39,
"avg_line_length": 13.9375,
"alnum_prop": 0.57847533632287,
"repo_name": "rbrecheisen/pyminer",
"id": "4a1710697b925371de3b8906bdedb7da489b5bb9",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_importers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2024"
},
{
"name": "Python",
"bytes": "53495"
}
],
"symlink_target": ""
}
|
import pandas as pd
from shapely.geometry import MultiLineString, MultiPoint, MultiPolygon
from shapely.geometry.base import BaseGeometry
_multi_type_map = {
"Point": MultiPoint,
"LineString": MultiLineString,
"Polygon": MultiPolygon,
}
def collect(x, multi=False):
"""
Collect single part geometries into their Multi* counterpart
Parameters
----------
x : an iterable or Series of Shapely geometries, a GeoSeries, or
a single Shapely geometry
multi : boolean, default False
if True, force returned geometries to be Multi* even if they
only have one component.
"""
if isinstance(x, BaseGeometry):
x = [x]
elif isinstance(x, pd.Series):
x = list(x)
# We cannot create GeometryCollection here so all types
# must be the same. If there is more than one element,
# they cannot be Multi*, i.e., can't pass in combination of
# Point and MultiPoint... or even just MultiPoint
t = x[0].type
if not all(g.type == t for g in x):
raise ValueError("Geometry type must be homogeneous")
if len(x) > 1 and t.startswith("Multi"):
raise ValueError("Cannot collect {0}. Must have single geometries".format(t))
if len(x) == 1 and (t.startswith("Multi") or not multi):
# If there's only one single part geom and we're not forcing to
# multi, then just return it
return x[0]
return _multi_type_map[t](x)
|
{
"content_hash": "ccabb4dfb2219e66b1b309fb80a61a99",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6529209621993127,
"repo_name": "jorisvandenbossche/geopandas",
"id": "c42c921a07bb36442513a2866e91b6a029a91dcd",
"size": "1455",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "geopandas/tools/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "32111"
},
{
"name": "Python",
"bytes": "1251716"
},
{
"name": "Shell",
"bytes": "754"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import asyncore
import email
from email.mime.text import MIMEText
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from smtplib import SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (EmailMessage, mail_admins, mail_managers,
EmailMultiAlternatives, send_mail, send_mass_mail)
from django.core.mail.backends import console, dummy, locmem, filebased, smtp
from django.core.mail.message import BadHeaderError
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.encoding import force_str, force_text
from django.utils.six import PY3, StringIO, string_types
from django.utils.translation import ugettext_lazy
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contens of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, string_types):
just_headers = message.split('\n\n', 1)[0]
hlist = just_headers.split('\n')
pairs = [hl.split(':', 1) for hl in hlist]
msg_headers = {(n, v.lstrip()) for (n, v) in pairs}
else:
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'), cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',))
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ascii) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Firstname Sürname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com')
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Sürname, Firstname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "from@example.com", ["to@example.com"],
headers={"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'other@example.com')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_string().endswith('\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload1.as_string().endswith('\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>'))
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_str = msg.message().as_string()
message = email.message_from_string(msg_str)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_str = msg.message().as_string()
message = email.message_from_string(msg_str)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse('>From the future' in email.message().as_string())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse('Content-Transfer-Encoding: base64' in msg.message().as_string())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_string()
self.assertFalse('Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue('Content-Transfer-Encoding: 7bit' in s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_string()
self.assertFalse(str('Content-Transfer-Encoding: quoted-printable') in s)
self.assertTrue(str('Content-Transfer-Encoding: 8bit') in s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_string()
self.assertFalse(str('Content-Transfer-Encoding: quoted-printable') in s)
self.assertTrue(str('Content-Transfer-Encoding: 8bit') in s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage('Child Subject', 'Some body of child message', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertTrue(str('Child Subject') in parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertTrue(str('Child Subject') in parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertTrue(str('Child Subject') in parent_s)
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertTrue('Content-Transfer-Encoding: base64' in txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertTrue('Content-Transfer-Encoding: base64' in txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertTrue(str('Content-Transfer-Encoding: base64') in txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertTrue(str('Content-Transfer-Encoding: base64') in txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin, object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError
def flush_mailbox(self):
raise NotImplementedError
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload()), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=(), MANAGERS=())
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com'),
('Cc', 'cc@example.com')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitely opened)
"""
conn = mail.get_connection(username='', password='')
try:
conn.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'r') as fp:
session = force_text(fp.read()).split('\n' + ('-' * 79) + '\n')
messages.extend(email.message_from_string(force_str(m)) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0])) as fp:
message = email.message_from_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = force_text(self.stream.getvalue()).split('\n' + ('-' * 79) + '\n')
return [email.message_from_string(force_str(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertMessageHasHeaders(s.getvalue(), {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com')})
self.assertIn('\nDate: ', s.getvalue())
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
super(FakeSMTPChannel, self).collect_incoming_data(data)
except UnicodeDecodeError:
# ignore decode error in SSL/TLS connection tests as we only care
# whether the connection attempt was made
pass
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
m = email.message_from_string(data)
if PY3:
maddr = email.utils.parseaddr(m.get('from'))[1]
else:
maddr = email.Utils.parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
@classmethod
def setUpClass(cls):
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
try:
backend.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
self.assertRaisesMessage(SMTPException,
'STARTTLS extension not supported by server.', backend.open)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
self.assertRaises(SSLError, backend.open)
|
{
"content_hash": "925604b696e22760e85473f525e30095",
"timestamp": "",
"source": "github",
"line_count": 932,
"max_line_length": 216,
"avg_line_length": 46.32081545064378,
"alnum_prop": 0.6340367376247944,
"repo_name": "adambrenecki/django",
"id": "bb57ca37ff5bf9b689d6508d4db60419f171dec7",
"size": "43261",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/mail/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100819"
},
{
"name": "Python",
"bytes": "8829204"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
"""Module containing the views for GSoC documents page.
"""
from django.conf.urls.defaults import url as django_url
from soc.logic.exceptions import AccessViolation
from soc.logic.exceptions import NotFound
from soc.models.document import Document
from soc.views import document
from soc.views.base_templates import ProgramSelect
from soc.views.helper import url_patterns
from soc.views.helper.access_checker import isSet
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.forms import GSoCModelForm
from soc.modules.gsoc.views.helper.url_patterns import url
from soc.modules.gsoc.views.helper import url_patterns as gsoc_url_patterns
class GSoCDocumentForm(GSoCModelForm):
"""Django form for creating documents.
"""
class Meta:
model = Document
exclude = [
'scope', 'scope_path', 'author', 'modified_by', 'prefix', 'home_for',
'link_id', 'read_access', 'write_access', 'is_featured'
]
class EditDocumentPage(RequestHandler):
"""Encapsulate all the methods required to edit documents.
"""
def templatePath(self):
return 'v2/modules/gsoc/document/base.html'
def djangoURLPatterns(self):
return [
url(r'document/edit/%s$' % gsoc_url_patterns.DOCUMENT, self,
name='edit_gsoc_document'),
url(r'document/edit/%s$' % gsoc_url_patterns.ORG_DOCUMENT, self,
name='edit_gsoc_document'),
]
def checkAccess(self):
self.mutator.documentKeyNameFromKwargs()
assert isSet(self.data.key_name)
self.check.canEditDocument()
def context(self):
form = GSoCDocumentForm(self.data.POST or None, instance=self.data.document)
if self.data.document:
page_name = 'Edit %s' % self.data.document.title
else:
page_name = 'Create new Document'
return {
'page_name': page_name,
'document_form': form,
}
def post(self):
"""Handler for HTTP POST request.
"""
form = GSoCDocumentForm(self.data.POST or None, instance=self.data.document)
validated_document = document.validateForm(self.data, form)
if validated_document:
self.redirect.document(validated_document)
self.redirect.to('edit_gsoc_document')
else:
self.get()
class DocumentPage(RequestHandler):
"""Encapsulate all the methods required to show documents.
"""
def templatePath(self):
return 'v2/modules/gsoc/base.html'
def djangoURLPatterns(self):
return [
url(r'document/show/%s$' % gsoc_url_patterns.DOCUMENT, self,
name='show_gsoc_document'),
url(r'document/show/%s$' % gsoc_url_patterns.ORG_DOCUMENT, self,
name='show_gsoc_document'),
django_url(r'^document/show/%s$' % gsoc_url_patterns.DOCUMENT,
self),
django_url(r'^document/show/%s$' % gsoc_url_patterns.ORG_DOCUMENT,
self),
]
def checkAccess(self):
self.mutator.documentKeyNameFromKwargs()
if not self.data.document:
raise NotFound("No such document: '%s'" % self.data.key_name)
self.check.canViewDocument()
def context(self):
return {
'tmpl': document.Document(self.data, self.data.document),
'page_name': self.data.document.title,
}
class EventsPage(RequestHandler):
"""Encapsulates all the methods required to show the events page.
"""
def templatePath(self):
return 'v2/modules/gsoc/document/events.html'
def djangoURLPatterns(self):
return [
url(r'events/%s$' % url_patterns.PROGRAM, self,
name='gsoc_events')
]
def checkAccess(self):
self.data.document = self.data.program.events_page
self.check.canViewDocument()
def context(self):
return {
'document': self.data.program.events_page,
'frame_url': self.data.program.events_frame_url,
'page_name': 'Events and Timeline',
}
class DocumentList(document.DocumentList):
"""Template for list of documents.
"""
def __init__(self, request, data):
super(DocumentList, self).__init__(request, data, 'edit_gsoc_document')
def templatePath(self):
return 'v2/modules/gsoc/document/_document_list.html'
class DocumentListPage(RequestHandler):
"""View for the list documents page.
"""
def templatePath(self):
return 'v2/modules/gsoc/document/document_list.html'
def djangoURLPatterns(self):
return [
url(r'documents/%s$' % url_patterns.PROGRAM, self,
name='list_gsoc_documents'),
]
def checkAccess(self):
self.check.isHost()
def jsonContext(self):
list_content = DocumentList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation(
'You do not have access to this data')
return list_content.content()
def context(self):
return {
'page_name': "Documents for %s" % self.data.program.name,
'document_list': DocumentList(self.request, self.data),
'program_select': ProgramSelect(self.data, 'list_gsoc_documents'),
}
|
{
"content_hash": "d67ad84bd6604a386250ef216331001f",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 80,
"avg_line_length": 27.988826815642458,
"alnum_prop": 0.6708582834331337,
"repo_name": "adviti/melange",
"id": "99be5b73f9a460d81a7d9b2b7b6a4e82a0acff06",
"size": "5620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/modules/gsoc/views/document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
import yaml
from flask import Config
class SkeletonConfig(Config):
def from_yaml(self, filename):
"""Reads the given yml file and imports all values"""
if not os.path.exists(filename):
raise Exception('Config file \"{}\" does not exist'.format(filename))
with open(filename) as f:
ymlconf = yaml.load(f)
ymlconf = upper_keys(ymlconf)
for key in ymlconf:
self[key] = ymlconf[key]
def get(self, value_name, default=None):
"""Access config values using a dot notation.
This method takes the name of a config value in dot notation::
app.config.get('db.host', 'localhost')
would return the value of app.config['DB']['HOST'] if it exists otherwise
it will return the string 'localhost'.
"""
data = self
# need to oppercase the value name since config values are stored in
# uppercase
for key in value_name.upper().split("."):
if key in data:
data = data[key]
else:
return default
return data
def upper_keys(x):
if isinstance(x, list):
return [upper_keys(v) for v in x]
if isinstance(x, dict):
return dict((k.upper(), upper_keys(v)) for k, v in x.iteritems())
return x
|
{
"content_hash": "aba88cc3a089599dd46d5bdeab8fb8a0",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 28.377777777777776,
"alnum_prop": 0.6147220046985121,
"repo_name": "ryakad/flask-skeleton",
"id": "188569404e3deb5d63c9f97c0f3eb23693422011",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skeleton/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "770"
},
{
"name": "JavaScript",
"bytes": "200"
},
{
"name": "Python",
"bytes": "5742"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Methodmint
from testimonials.models import *
admin.site.register(Testimonial)
|
{
"content_hash": "9a2d75ee4e729eb2e43e88b0920dbf80",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 33,
"avg_line_length": 14.625,
"alnum_prop": 0.8034188034188035,
"repo_name": "mfitzp/django-golifescience",
"id": "ec02bcdf536c82ea56c42bb96730c5c58323f5a8",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/testimonials/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "201813"
}
],
"symlink_target": ""
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._type import TypeValidator
from ._title import TitleValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._tickformatstopdefaults import TickformatstopdefaultsValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._startlinewidth import StartlinewidthValidator
from ._startlinecolor import StartlinecolorValidator
from ._startline import StartlineValidator
from ._smoothing import SmoothingValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showline import ShowlineValidator
from ._showgrid import ShowgridValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._rangemode import RangemodeValidator
from ._range import RangeValidator
from ._nticks import NticksValidator
from ._minorgridwidth import MinorgridwidthValidator
from ._minorgriddash import MinorgriddashValidator
from ._minorgridcount import MinorgridcountValidator
from ._minorgridcolor import MinorgridcolorValidator
from ._minexponent import MinexponentValidator
from ._linewidth import LinewidthValidator
from ._linecolor import LinecolorValidator
from ._labelsuffix import LabelsuffixValidator
from ._labelprefix import LabelprefixValidator
from ._labelpadding import LabelpaddingValidator
from ._gridwidth import GridwidthValidator
from ._griddash import GriddashValidator
from ._gridcolor import GridcolorValidator
from ._fixedrange import FixedrangeValidator
from ._exponentformat import ExponentformatValidator
from ._endlinewidth import EndlinewidthValidator
from ._endlinecolor import EndlinecolorValidator
from ._endline import EndlineValidator
from ._dtick import DtickValidator
from ._color import ColorValidator
from ._cheatertype import CheatertypeValidator
from ._categoryorder import CategoryorderValidator
from ._categoryarraysrc import CategoryarraysrcValidator
from ._categoryarray import CategoryarrayValidator
from ._autotypenumbers import AutotypenumbersValidator
from ._autorange import AutorangeValidator
from ._arraytick0 import Arraytick0Validator
from ._arraydtick import ArraydtickValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._type.TypeValidator",
"._title.TitleValidator",
"._tickvalssrc.TickvalssrcValidator",
"._tickvals.TickvalsValidator",
"._ticktextsrc.TicktextsrcValidator",
"._ticktext.TicktextValidator",
"._ticksuffix.TicksuffixValidator",
"._tickprefix.TickprefixValidator",
"._tickmode.TickmodeValidator",
"._tickformatstopdefaults.TickformatstopdefaultsValidator",
"._tickformatstops.TickformatstopsValidator",
"._tickformat.TickformatValidator",
"._tickfont.TickfontValidator",
"._tickangle.TickangleValidator",
"._tick0.Tick0Validator",
"._startlinewidth.StartlinewidthValidator",
"._startlinecolor.StartlinecolorValidator",
"._startline.StartlineValidator",
"._smoothing.SmoothingValidator",
"._showticksuffix.ShowticksuffixValidator",
"._showtickprefix.ShowtickprefixValidator",
"._showticklabels.ShowticklabelsValidator",
"._showline.ShowlineValidator",
"._showgrid.ShowgridValidator",
"._showexponent.ShowexponentValidator",
"._separatethousands.SeparatethousandsValidator",
"._rangemode.RangemodeValidator",
"._range.RangeValidator",
"._nticks.NticksValidator",
"._minorgridwidth.MinorgridwidthValidator",
"._minorgriddash.MinorgriddashValidator",
"._minorgridcount.MinorgridcountValidator",
"._minorgridcolor.MinorgridcolorValidator",
"._minexponent.MinexponentValidator",
"._linewidth.LinewidthValidator",
"._linecolor.LinecolorValidator",
"._labelsuffix.LabelsuffixValidator",
"._labelprefix.LabelprefixValidator",
"._labelpadding.LabelpaddingValidator",
"._gridwidth.GridwidthValidator",
"._griddash.GriddashValidator",
"._gridcolor.GridcolorValidator",
"._fixedrange.FixedrangeValidator",
"._exponentformat.ExponentformatValidator",
"._endlinewidth.EndlinewidthValidator",
"._endlinecolor.EndlinecolorValidator",
"._endline.EndlineValidator",
"._dtick.DtickValidator",
"._color.ColorValidator",
"._cheatertype.CheatertypeValidator",
"._categoryorder.CategoryorderValidator",
"._categoryarraysrc.CategoryarraysrcValidator",
"._categoryarray.CategoryarrayValidator",
"._autotypenumbers.AutotypenumbersValidator",
"._autorange.AutorangeValidator",
"._arraytick0.Arraytick0Validator",
"._arraydtick.ArraydtickValidator",
],
)
|
{
"content_hash": "8782033d60754f1104b6506f4e86b5b4",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 72,
"avg_line_length": 46.661417322834644,
"alnum_prop": 0.7013162335470806,
"repo_name": "plotly/plotly.py",
"id": "73da7a7073553d4e4abc35764ec6d5fdef5e4b92",
"size": "5926",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/aaxis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.watchers.github.team
:platform: Unix
:synopsis: Watcher for GitHub Organization Teams.
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
from security_monkey import app
from security_monkey.common.github.util import get_github_creds, iter_org, strip_url_fields
from security_monkey.datastore import Account
from security_monkey.decorators import record_exception
from security_monkey.exceptions import InvalidResponseCodeFromGitHubError
from security_monkey.watcher import Watcher, ChangeItem
import requests
GITHUB_URL = "https://api.github.com/"
class GitHubTeam(Watcher):
index = 'team'
i_am_singular = 'team'
i_am_plural = 'teams'
account_type = 'GitHub'
def __init__(self, accounts=None, debug=False):
super(GitHubTeam, self).__init__(accounts=accounts, debug=debug)
self.honor_ephemerals = True
self.ephemeral_paths = []
self.github_creds = get_github_creds(self.accounts)
def slurp(self):
@record_exception(source="{index}-watcher".format(index=self.index))
def fetch_org_teams(**kwargs):
account = Account.query.filter(Account.name == kwargs["account_name"]).first()
item_list = []
# Fetch teams:
app.logger.debug("Fetching organization teams for: {}".format(account.identifier))
teams = strip_url_fields(self.list_org_teams(account.identifier))
for team in teams:
item_list.append(GitHubTeamItem(
account=account.name,
name=team["name"],
arn="{}/team/{}".format(account.identifier, team["slug"]),
config=team,
source_watcher=self
))
return item_list, kwargs["exception_map"]
@iter_org(orgs=self.accounts)
def slurp_items(**kwargs):
# Are we skipping this org?
if self.check_ignore_list(kwargs["account_name"]):
app.logger.debug("Skipping ignored account: {}".format(kwargs["account_name"]))
return [], kwargs["exception_map"]
# Exception handling complexities...
results = fetch_org_teams(**kwargs)
if not results:
return [], kwargs["exception_map"]
return results
items, exc = slurp_items(index=self.index)
return items, exc
def list_org_teams(self, org):
headers = {
'Authorization': 'token {}'.format(self.github_creds[org])
}
params = {
"page": 1,
}
done = False
teams = []
while not done:
url = "{}orgs/{}/teams".format(GITHUB_URL, org)
result = requests.get(url, headers=headers, params=params)
if result.status_code != 200:
raise InvalidResponseCodeFromGitHubError(org, result.status_code)
if not result.links.get("last"):
done = True
else:
params["page"] += 1
result_json = result.json()
teams += result_json
return teams
class GitHubTeamItem(ChangeItem):
def __init__(self, account=None, name=None, arn=None, config=None, source_watcher=None):
super(GitHubTeamItem, self).__init__(index=GitHubTeam.index,
region="universal",
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
|
{
"content_hash": "bbd37fba4f4a9f64ac9a598d725e7b32",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 95,
"avg_line_length": 33.830357142857146,
"alnum_prop": 0.555027711797308,
"repo_name": "Netflix/security_monkey",
"id": "1a5d0d468c5fb88ebb94ecf679924d8450603691",
"size": "4400",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "security_monkey/watchers/github/team.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22837"
},
{
"name": "Dart",
"bytes": "130852"
},
{
"name": "Dockerfile",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "120266"
},
{
"name": "JavaScript",
"bytes": "13728"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1578684"
},
{
"name": "Shell",
"bytes": "30939"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
import time
import os
from daemon import runner
class FanControl():
# Return CPU temperature as float
def getCPUtemp(self):
cTemp = os.popen('vcgencmd measure_temp').readline()
return float(cTemp.replace("temp=","").replace("'C\n",""))
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/var/log/fancontrol.log'
self.stderr_path = '/var/log/fancontrol.log'
self.pidfile_path = '/var/run/fancontrol.pid'
self.pidfile_timeout = 5
def run(self):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.OUT)
GPIO.setwarnings(False)
p=GPIO.PWM(12, 1000)
PWM = 50
while True:
CPU_temp = self.getCPUtemp()
if CPU_temp > 40.5:
PWM = min(max(PWM + 1, 0), 100)
p.start(PWM)
elif CPU_temp < 39.5:
PWM = min(max(PWM - 1, 0), 100)
p.start(PWM)
time.sleep(5)
GPIO.cleanup()
fancontrol = FanControl()
daemon_runner = runner.DaemonRunner(fancontrol)
daemon_runner.do_action()
|
{
"content_hash": "9581d665d73c77a70702ac29c02988fa",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 66,
"avg_line_length": 29.894736842105264,
"alnum_prop": 0.5625,
"repo_name": "jpoirier/stratux-setup",
"id": "584ffc98469fad46ac3ef6a30ca8f29d8d29eb3c",
"size": "1327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "files/fancontrol.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "36962"
}
],
"symlink_target": ""
}
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class segment_routing_algorithm(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-algorithm. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The algorithms supported for Segment Routing by the local system
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "segment-routing-algorithm"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"segment-routing-algorithm",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_algorithm/state (container)
YANG Description: State parameters of the Segment Routing algorithm advertised in
the RI LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_algorithm/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the Segment Routing algorithm advertised in
the RI LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class segment_routing_algorithm(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-algorithm. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The algorithms supported for Segment Routing by the local system
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "segment-routing-algorithm"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"segment-routing-algorithm",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_algorithm/state (container)
YANG Description: State parameters of the Segment Routing algorithm advertised in
the RI LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_algorithm/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of the Segment Routing algorithm advertised in
the RI LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
{
"content_hash": "c4f0ea3ca0a417f2623dcfc8ccc59e93",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 375,
"avg_line_length": 38.096969696969694,
"alnum_prop": 0.5777919185491569,
"repo_name": "napalm-automation/napalm-yang",
"id": "d5a84bbc8aca23d5a6a16b5cb3d761b3ec6b3b20",
"size": "12596",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_algorithm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
}
|
import six
from kmip.core.attributes import CertificateType
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core import exceptions
from kmip.core.misc import CertificateValue
from kmip.core import objects
from kmip.core.objects import Attribute
from kmip.core.objects import KeyBlock
from kmip.core import primitives
from kmip.core.primitives import Struct
from kmip.core.primitives import Enumeration
from kmip.core.primitives import ByteString
from kmip.core import utils
from kmip.core.utils import BytearrayStream
# 2.2
# 2.2.1
class Certificate(Struct):
"""
A structure representing a DER-encoded X.509 public key certificate.
See Section 2.2.1 of the KMIP 1.1 specification for more information.
Attributes:
certificate_type: The type of the certificate.
certificate_value: The bytes of the certificate.
"""
def __init__(self,
certificate_type=None,
certificate_value=None):
"""
Construct a Certificate object.
Args:
certificate_type (CertificateType): The type of the
certificate. Optional, defaults to None.
certificate_value (bytes): The bytes of the certificate. Optional,
defaults to None.
"""
super(Certificate, self).__init__(Tags.CERTIFICATE)
if certificate_type is None:
self.certificate_type = CertificateType()
else:
self.certificate_type = CertificateType(certificate_type)
if certificate_value is None:
self.certificate_value = CertificateValue()
else:
self.certificate_value = CertificateValue(certificate_value)
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Certificate object and decode it into its
constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(Certificate, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.certificate_type = CertificateType()
self.certificate_value = CertificateValue()
self.certificate_type.read(tstream, kmip_version=kmip_version)
self.certificate_value.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
tstream = BytearrayStream()
self.certificate_type.write(tstream, kmip_version=kmip_version)
self.certificate_value.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(Certificate, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def __eq__(self, other):
if isinstance(other, Certificate):
if self.certificate_type != other.certificate_type:
return False
elif self.certificate_value != other.certificate_value:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Certificate):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
return "{0}(certificate_type={1}, certificate_value=b'{2}')".format(
type(self).__name__,
str(self.certificate_type),
str(self.certificate_value))
def __str__(self):
return "{0}".format(str(self.certificate_value))
# 2.2.2
class KeyBlockKey(Struct):
def __init__(self, key_block=None, tag=Tags.DEFAULT):
super(KeyBlockKey, self).__init__(tag)
self.key_block = key_block
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(KeyBlockKey, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.key_block = KeyBlock()
self.key_block.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.key_block.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(KeyBlockKey, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class SymmetricKey(KeyBlockKey):
def __init__(self, key_block=None):
super(SymmetricKey, self).__init__(key_block, Tags.SYMMETRIC_KEY)
self.validate()
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.3
class PublicKey(KeyBlockKey):
def __init__(self, key_block=None):
super(PublicKey, self).__init__(key_block, Tags.PUBLIC_KEY)
self.validate()
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.4
class PrivateKey(KeyBlockKey):
def __init__(self, key_block=None):
super(PrivateKey, self).__init__(key_block, Tags.PRIVATE_KEY)
self.validate()
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class SplitKey(primitives.Struct):
"""
A split key cryptographic object.
This object represents a symmetric or private key that has been split into
multiple parts. The fields of this object specify how the key was split
and how it can be reassembled.
Attributes:
split_key_parts: The total number of parts of the split key.
key_part_identifier: The ID specifying the part of the key in the key
block.
split_key_threshold: The minimum number of parts needed to reconstruct
the key.
split_key_method: The method by which the key was split.
prime_field_size: The prime field size used for the Polynomial Sharing
Prime Field split key method.
key_block: The split key part held by this object.
"""
def __init__(self,
split_key_parts=None,
key_part_identifier=None,
split_key_threshold=None,
split_key_method=None,
prime_field_size=None,
key_block=None):
"""
Construct a SplitKey object.
Args:
split_key_parts (int): An integer specifying the total number of
parts of the split key. Optional, defaults to None. Required
for read/write.
key_part_identifier (int): An integer specifying which key part is
contained in the key block. Optional, defaults to None.
Required for read/write.
split_key_threshold (int): An integer specifying the minimum number
of key parts required to reconstruct the split key. Optional,
defaults to None. Required for read/write.
split_key_method (enum): A SplitKeyMethod enumeration specifying
the method by which the key was split. Optional, defaults to
None. Required for read/write.
prime_field_size (int): A big integer specifying the prime field
size used for the Polynomial Sharing Prime Field split key
method. Optional, defaults to None. Required for read/write
only if the split key method is Polynomial Sharing Prime Field.
key_block (struct): A KeyBlock structure containing the split key
part identified by the key part identifier. Optional, defaults
to None. Required for read/write.
"""
super(SplitKey, self).__init__(enums.Tags.SPLIT_KEY)
self._split_key_parts = None
self._key_part_identifier = None
self._split_key_threshold = None
self._split_key_method = None
self._prime_field_size = None
self._key_block = None
self.split_key_parts = split_key_parts
self.key_part_identifier = key_part_identifier
self.split_key_threshold = split_key_threshold
self.split_key_method = split_key_method
self.prime_field_size = prime_field_size
self.key_block = key_block
@property
def split_key_parts(self):
if self._split_key_parts is not None:
return self._split_key_parts.value
return None
@split_key_parts.setter
def split_key_parts(self, value):
if value is None:
self._split_key_parts = None
elif isinstance(value, six.integer_types):
self._split_key_parts = primitives.Integer(
value=value,
tag=enums.Tags.SPLIT_KEY_PARTS
)
else:
raise TypeError("The split key parts must be an integer.")
@property
def key_part_identifier(self):
if self._key_part_identifier is not None:
return self._key_part_identifier.value
return None
@key_part_identifier.setter
def key_part_identifier(self, value):
if value is None:
self._key_part_identifier = None
elif isinstance(value, six.integer_types):
self._key_part_identifier = primitives.Integer(
value=value,
tag=enums.Tags.KEY_PART_IDENTIFIER
)
else:
raise TypeError("The key part identifier must be an integer.")
@property
def split_key_threshold(self):
if self._split_key_threshold is not None:
return self._split_key_threshold.value
return None
@split_key_threshold.setter
def split_key_threshold(self, value):
if value is None:
self._split_key_threshold = None
elif isinstance(value, six.integer_types):
self._split_key_threshold = primitives.Integer(
value=value,
tag=enums.Tags.SPLIT_KEY_THRESHOLD
)
else:
raise TypeError("The split key threshold must be an integer.")
@property
def split_key_method(self):
if self._split_key_method is not None:
return self._split_key_method.value
return None
@split_key_method.setter
def split_key_method(self, value):
if value is None:
self._split_key_method = None
elif isinstance(value, enums.SplitKeyMethod):
self._split_key_method = primitives.Enumeration(
enums.SplitKeyMethod,
value=value,
tag=enums.Tags.SPLIT_KEY_METHOD
)
else:
raise TypeError(
"The split key method must be a SplitKeyMethod enumeration."
)
@property
def prime_field_size(self):
if self._prime_field_size is not None:
return self._prime_field_size.value
return None
@prime_field_size.setter
def prime_field_size(self, value):
if value is None:
self._prime_field_size = None
elif isinstance(value, six.integer_types):
self._prime_field_size = primitives.BigInteger(
value=value,
tag=enums.Tags.PRIME_FIELD_SIZE
)
else:
raise TypeError("The prime field size must be an integer.")
@property
def key_block(self):
if self._key_block is not None:
return self._key_block
return None
@key_block.setter
def key_block(self, value):
if value is None:
self._key_block = None
elif isinstance(value, objects.KeyBlock):
self._key_block = value
else:
raise TypeError("The key block must be a KeyBlock structure.")
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the SplitKey object and decode it.
Args:
input_buffer (stream): A data stream containing the encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(SplitKey, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.SPLIT_KEY_PARTS, local_buffer):
self._split_key_parts = primitives.Integer(
tag=enums.Tags.SPLIT_KEY_PARTS
)
self._split_key_parts.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the SplitKeyParts field."
)
if self.is_tag_next(enums.Tags.KEY_PART_IDENTIFIER, local_buffer):
self._key_part_identifier = primitives.Integer(
tag=enums.Tags.KEY_PART_IDENTIFIER
)
self._key_part_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the KeyPartIdentifier field."
)
if self.is_tag_next(enums.Tags.SPLIT_KEY_THRESHOLD, local_buffer):
self._split_key_threshold = primitives.Integer(
tag=enums.Tags.SPLIT_KEY_THRESHOLD
)
self._split_key_threshold.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the SplitKeyThreshold field."
)
if self.is_tag_next(enums.Tags.SPLIT_KEY_METHOD, local_buffer):
self._split_key_method = primitives.Enumeration(
enums.SplitKeyMethod,
tag=enums.Tags.SPLIT_KEY_METHOD
)
self._split_key_method.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the SplitKeyMethod field."
)
if self.is_tag_next(enums.Tags.PRIME_FIELD_SIZE, local_buffer):
self._prime_field_size = primitives.BigInteger(
tag=enums.Tags.PRIME_FIELD_SIZE
)
self._prime_field_size.read(
local_buffer,
kmip_version=kmip_version
)
else:
corner_case = enums.SplitKeyMethod.POLYNOMIAL_SHARING_PRIME_FIELD
if self.split_key_method == corner_case:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the PrimeFieldSize "
"field. This field is required when the SplitKeyMethod is "
"PolynomialSharingPrimeField."
)
if self.is_tag_next(enums.Tags.KEY_BLOCK, local_buffer):
self._key_block = objects.KeyBlock()
self._key_block.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the KeyBlock field."
)
self.is_oversized(local_buffer)
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the SplitKey object to a buffer.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
local_buffer = utils.BytearrayStream()
if self._split_key_parts:
self._split_key_parts.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the SplitKeyParts field."
)
if self._key_part_identifier:
self._key_part_identifier.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the KeyPartIdentifier field."
)
if self._split_key_threshold:
self._split_key_threshold.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the SplitKeyThreshold field."
)
if self._split_key_method:
self._split_key_method.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the SplitKeyMethod field."
)
if self._prime_field_size:
self._prime_field_size.write(
local_buffer,
kmip_version=kmip_version
)
else:
corner_case = enums.SplitKeyMethod.POLYNOMIAL_SHARING_PRIME_FIELD
if self.split_key_method == corner_case:
raise exceptions.InvalidField(
"The SplitKey object is missing the PrimeFieldSize field. "
"This field is required when the SplitKeyMethod is "
"PolynomialSharingPrimeField."
)
if self._key_block:
self._key_block.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the KeyBlock field."
)
self.length = local_buffer.length()
super(SplitKey, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer)
def __repr__(self):
args = [
"split_key_parts={}".format(repr(self.split_key_parts)),
"key_part_identifier={}".format(repr(self.key_part_identifier)),
"split_key_threshold={}".format(repr(self.split_key_threshold)),
"split_key_method={}".format(self.split_key_method),
"prime_field_size={}".format(repr(self.prime_field_size)),
"key_block={}".format(repr(self.key_block))
]
return "SplitKey({})".format(", ".join(args))
def __str__(self):
# TODO (peter-hamilton) Replace str() call below with a dict() call.
value = ", ".join(
[
'"split_key_parts": {}'.format(self.split_key_parts),
'"key_part_identifier": {}'.format(self.key_part_identifier),
'"split_key_threshold": {}'.format(self.split_key_threshold),
'"split_key_method": {}'.format(self.split_key_method),
'"prime_field_size": {}'.format(self.prime_field_size),
'"key_block": {}'.format(str(self.key_block))
]
)
return "{" + value + "}"
def __eq__(self, other):
if isinstance(other, SplitKey):
if self.split_key_parts != other.split_key_parts:
return False
elif self.key_part_identifier != other.key_part_identifier:
return False
elif self.split_key_threshold != other.split_key_threshold:
return False
elif self.split_key_method != other.split_key_method:
return False
elif self.prime_field_size != other.prime_field_size:
return False
# elif self.key_block != other.key_block:
# return False
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SplitKey):
return not self.__eq__(other)
else:
return NotImplemented
# 2.2.6
class Template(Struct):
def __init__(self, attributes=None):
super(Template, self).__init__(Tags.TEMPLATE)
self.attributes = attributes
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(Template, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.attributes = list()
attribute = Attribute()
attribute.read(tstream, kmip_version=kmip_version)
self.attributes.append(attribute)
while self.is_tag_next(Tags.ATTRIBUTE, tstream):
attribute = Attribute()
attribute.read(tstream, kmip_version=kmip_version)
self.attributes.append(attribute)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
for attribute in self.attributes:
attribute.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(Template, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.7
class SecretData(Struct):
class SecretDataType(Enumeration):
def __init__(self, value=None):
super(SecretData.SecretDataType, self).__init__(
enums.SecretDataType, value, Tags.SECRET_DATA_TYPE)
def __init__(self,
secret_data_type=None,
key_block=None):
super(SecretData, self).__init__(Tags.SECRET_DATA)
self.secret_data_type = secret_data_type
self.key_block = key_block
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(SecretData, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.secret_data_type = SecretData.SecretDataType()
self.key_block = KeyBlock()
self.secret_data_type.read(tstream, kmip_version=kmip_version)
self.key_block.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.secret_data_type.write(tstream, kmip_version=kmip_version)
self.key_block.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(SecretData, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.8
class OpaqueObject(Struct):
class OpaqueDataType(Enumeration):
def __init__(self, value=None):
super(OpaqueObject.OpaqueDataType, self).__init__(
enums.OpaqueDataType, value, Tags.OPAQUE_DATA_TYPE)
class OpaqueDataValue(ByteString):
def __init__(self, value=None):
super(OpaqueObject.OpaqueDataValue, self).__init__(
value, Tags.OPAQUE_DATA_VALUE)
def __init__(self,
opaque_data_type=None,
opaque_data_value=None):
super(OpaqueObject, self).__init__(Tags.OPAQUE_OBJECT)
self.opaque_data_type = opaque_data_type
self.opaque_data_value = opaque_data_value
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(OpaqueObject, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.opaque_data_type = OpaqueObject.OpaqueDataType()
self.opaque_data_value = OpaqueObject.OpaqueDataValue()
self.opaque_data_type.read(tstream, kmip_version=kmip_version)
self.opaque_data_value.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.opaque_data_type.write(tstream, kmip_version=kmip_version)
self.opaque_data_value.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(OpaqueObject, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
|
{
"content_hash": "e01c94630f59ab932494fb0108dd839d",
"timestamp": "",
"source": "github",
"line_count": 754,
"max_line_length": 79,
"avg_line_length": 35.07161803713528,
"alnum_prop": 0.5997579791256996,
"repo_name": "OpenKMIP/PyKMIP",
"id": "1b0ceace0ffb76e5c1875070b0b73e0b28ba05c5",
"size": "27090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kmip/core/secrets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5552888"
},
{
"name": "Shell",
"bytes": "1214"
}
],
"symlink_target": ""
}
|
from openstack.tests.functional import base
class TestResource(base.BaseFunctionalTest):
def test_list(self):
ids = [o.resource_id for o in self.conn.telemetry.resources()]
self.assertNotEqual(0, len(ids))
|
{
"content_hash": "901a42d069ee778e87cb5e5ca4699da1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 70,
"avg_line_length": 28.625,
"alnum_prop": 0.7117903930131004,
"repo_name": "mtougeron/python-openstacksdk",
"id": "0fcaf865a0c0cf117602cb46b248925c267d0de6",
"size": "775",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack/tests/functional/telemetry/v2/test_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1096143"
},
{
"name": "Shell",
"bytes": "3436"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0028_remove_postmortem'),
]
operations = [
migrations.AddField(
model_name='event',
name='offer_help_email_sent',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='event',
name='submit_information_email_sent',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='event',
name='thank_you_email_sent',
field=models.DateTimeField(blank=True, null=True),
),
]
|
{
"content_hash": "685271469c52059a207e642e5273b163",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 62,
"avg_line_length": 27.107142857142858,
"alnum_prop": 0.5744400527009222,
"repo_name": "patjouk/djangogirls",
"id": "6b301ba680c4775820a916081a50ce8c1956bedd",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0029_auto_20170107_1539.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32625"
},
{
"name": "HTML",
"bytes": "285328"
},
{
"name": "JavaScript",
"bytes": "12129"
},
{
"name": "Python",
"bytes": "396039"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
}
|
from pathlib import Path
from subprocess import Popen # nosec
from time import sleep
from typing import IO
def _dialer_is_connected(log_path: str) -> bool:
with open(log_path, 'rb') as fobj:
for line in fobj:
if line.startswith(b'--> secondary DNS address'):
return True
return False
def _start_dialer(config: Path, log_file: IO) -> Popen:
return Popen(['/usr/bin/wvdial', '--config', str(config.absolute())], stderr=log_file)
def dialup(config: Path, log: Path, max_retries: int, poll_seconds: int) -> Popen:
with log.open(mode='w+b') as log:
connection = _start_dialer(config, log)
while not _dialer_is_connected(log.name):
if connection.poll() is not None:
connection.terminate()
raise ValueError('Invalid wvdial configuration')
if max_retries <= 0:
connection.terminate()
raise ValueError('Modem taking too long to connect')
sleep(poll_seconds)
max_retries -= 1
return connection
|
{
"content_hash": "6c58a6cd4f54485875ba20b80c78206d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 90,
"avg_line_length": 30.942857142857143,
"alnum_prop": 0.605724838411819,
"repo_name": "ascoderu/opwen-webapp",
"id": "182fa3a6de79a647d67d8f91bccdb4a727de30ed",
"size": "1083",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opwen_email_client/domain/sim/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "31880"
},
{
"name": "JavaScript",
"bytes": "13480"
},
{
"name": "Makefile",
"bytes": "3078"
},
{
"name": "Python",
"bytes": "160924"
}
],
"symlink_target": ""
}
|
import pwnlib.tubes
def handle_pow(r):
print(r.recvuntil(b'python3 '))
print(r.recvuntil(b' solve '))
challenge = r.recvline().decode('ascii').strip()
p = pwnlib.tubes.process.process(['kctf_bypass_pow', challenge])
solution = p.readall().strip()
r.sendline(solution)
print(r.recvuntil(b'Correct'))
r = pwnlib.tubes.remote.remote('127.0.0.1', 1337)
print(r.recvuntil('== proof-of-work: '))
if r.recvline().startswith(b'enabled'):
handle_pow(r)
r.sendline(b"echo $'Hello \\x57orld'")
print(r.recvuntil(b'Hello World'))
exit(0)
|
{
"content_hash": "60ea463c05c771d496eeede34881240f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 28.1,
"alnum_prop": 0.6637010676156584,
"repo_name": "google/google-ctf",
"id": "e6014f2e63bd2102258ccec3bf00554538ede661",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vrp/level1/healthcheck/doit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "508"
},
{
"name": "Assembly",
"bytes": "107617"
},
{
"name": "BASIC",
"bytes": "6068"
},
{
"name": "Batchfile",
"bytes": "1032"
},
{
"name": "Blade",
"bytes": "14530"
},
{
"name": "C",
"bytes": "1481904"
},
{
"name": "C++",
"bytes": "2139472"
},
{
"name": "CMake",
"bytes": "11595"
},
{
"name": "CSS",
"bytes": "172375"
},
{
"name": "Dart",
"bytes": "6282"
},
{
"name": "Dockerfile",
"bytes": "232352"
},
{
"name": "EJS",
"bytes": "92308"
},
{
"name": "Emacs Lisp",
"bytes": "2668"
},
{
"name": "GDB",
"bytes": "273"
},
{
"name": "GLSL",
"bytes": "33392"
},
{
"name": "Go",
"bytes": "3031142"
},
{
"name": "HTML",
"bytes": "467647"
},
{
"name": "Java",
"bytes": "174199"
},
{
"name": "JavaScript",
"bytes": "2643200"
},
{
"name": "Lua",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "149152"
},
{
"name": "NSIS",
"bytes": "2800"
},
{
"name": "Nix",
"bytes": "139"
},
{
"name": "PHP",
"bytes": "311900"
},
{
"name": "Perl",
"bytes": "32742"
},
{
"name": "Pug",
"bytes": "8752"
},
{
"name": "Python",
"bytes": "1756592"
},
{
"name": "Red",
"bytes": "188"
},
{
"name": "Rust",
"bytes": "541267"
},
{
"name": "Sage",
"bytes": "39814"
},
{
"name": "Shell",
"bytes": "382149"
},
{
"name": "Smali",
"bytes": "2316656"
},
{
"name": "Starlark",
"bytes": "8216"
},
{
"name": "SystemVerilog",
"bytes": "16466"
},
{
"name": "VCL",
"bytes": "895"
},
{
"name": "Verilog",
"bytes": "7230"
},
{
"name": "Vim Script",
"bytes": "890"
},
{
"name": "Vue",
"bytes": "10248"
}
],
"symlink_target": ""
}
|
"""
Training various Deep Learning Algorithms using Keras with Tensorboard backend
This module takes paths to the location of images to be used for Image
classification. As of now it is used for cognitive radio spectrum awareness
based on the DySpan2017 Challenge spectrum setup, but it can be used for any
multiclass image recognition.
Example:
To run this script just type:
$ python2 cnn_k_1_2.py
The name stands for:
- CNN - Convolutional Neural Network
- k_1_2 - Keras-v1.2 which had to be used because of hardware
restrictions
**is written for python2**
TODO:
Urgent:
* Revise regularization
TODO: so far this regularization is applied to all convolution layers.
Better to determine if there is an optimum way or layer for this to be
applied to
Optional:
* API update - Keras and tensorflow
* py3k
"""
from argparse import ArgumentParser
import math
import os.path
import sys
from keras.callbacks import (LearningRateScheduler, TensorBoard, EarlyStopping,
CSVLogger, ModelCheckpoint)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Flatten, Dense
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2, activity_l2
##################################################
# Parser setup
##################################################
parser = ArgumentParser()
parser.add_argument("-n", # Alias
"--name",
help="Set the basename for the generated files",
default=None)
args = parser.parse_args()
##################################################
# Constants and Variables
##################################################
BASEPATH = '../trained_models/keras'
if args.name is None:
print("[ERROR]: Please provide a basename for the generated files.")
sys.exit()
basename = args.name
# Set the filenames based on the basename
names = {'model': basename + "_model.h5",
'weights': basename + "_weights.h5",
'checkpoint': basename + '_ckpt.h5',
'csv': basename + '.csv',
}
# dimensions of our images.
img_width, img_height = 64, 64
# This is my relative location, please change accordingly
train_data_dir = '../../data/pic_set/train'
test_data_dir = '../../data/pic_set/test'
nb_train_samples = 76300
nb_test_samples = 8400
# Currently each iteration takes 7 seconds in this machine, so I set 3000
# Epochs only based on time constraints. Based on my results, more iterations
# Will *not* give me better results anyways
EPOCHS = 3000 #
BATCH_SIZE = 50
MOMENTUM = 0.9
# Apply L2 regularization
L2 = 0.0005
##################################################
# Procedural Start
##################################################
# Check if files already exist to avoid accidental
# Overwrite
for folder, name in names.items():
my_file = os.path.join(BASEPATH, folder, name)
if os.path.isfile(my_file):
print("[ERROR]: " + my_file + " already exists.")
print("Please choose another basename")
sys.exit()
##################################################
# Model definition
##################################################
# Define CNN based on
# http://ieeexplore.ieee.org/document/8017499/
model = Sequential()
# The last dimension is 1 because of gray-scale
model.add(Conv2D(48, 2, 2,
input_shape=(img_width, img_height, 1)
)
)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(192, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(192, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024,
W_regularizer=l2(L2),
activity_regularizer=activity_l2(L2)
)
)
model.add(Dense(1024,
W_regularizer=l2(L2),
activity_regularizer=activity_l2(L2)
)
)
model.add(Dense(10))
model.add(Activation('softmax'))
# Print the model summary to terminal
model.summary()
# Compile the model
# Need to be categorical because this is not a binary classification problem
# Compile stochastic gradient descent model with step decreasing learning rate
sgd = SGD(lr=0.001, momentum=MOMENTUM, decay=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# Add a learning rate schedule
# References
# ---------
# https://goo.gl/4vQhdj
# https://goo.gl/VrrciJ
def step_decay(epoch):
"""
Define a step decay, which takes the form
lr = lr0 * drop^floor(epoch/epocs_drop)
"""
initial_lrate = 0.001
gamma = 0.1
epochs_drop = EPOCHS / 4
lrate = initial_lrate * math.pow(gamma, math.floor((1+epoch) / epochs_drop))
return lrate
# learning schedule callback
lschedule = LearningRateScheduler(step_decay)
# Set the callback for Tensorboard (visualization purposes)
tbCallBack = TensorBoard(log_dir=os.path.join(BASEPATH,
'tensorboard'),
histogram_freq=0,
write_graph=True, write_images=False)
#Setting EarlyStopping for rmsprop
early_stop = EarlyStopping(monitor='val_acc',
min_delta=0,
patience=60,
verbose=2,
mode='auto')
# CVS logger callback
csv = CSVLogger(os.path.join(BASEPATH,
'csv',
names['csv']),
separator=',', append=False)
# Checkpointer callback: Saves the model weights after each epoch if
# the validation loss decreased
checkpointer = ModelCheckpoint(filepath=os.path.join(BASEPATH,
'checkpoint',
names['checkpoint']),
verbose=1,
save_best_only=True)
# Set the callback list
# callbacks_list = [lschedule, tbCallBack]
callbacks_list = [tbCallBack, csv, checkpointer]
# Create Image generators
# Not using rescaling or any other data augmentation technique
train_datagen = ImageDataGenerator()
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=BATCH_SIZE,
color_mode="grayscale",
shuffle=True,
class_mode='categorical')
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=BATCH_SIZE,
color_mode="grayscale",
shuffle=True,
class_mode='categorical')
# Fit the model
model.fit_generator(
generator=train_generator,
samples_per_epoch=1550,
nb_epoch=EPOCHS,
callbacks=callbacks_list,
validation_data=test_generator,
nb_val_samples=nb_test_samples // BATCH_SIZE)
# Save the models
model.save_weights(os.path.join(BASEPATH, 'weights', names['weights']))
model.save(os.path.join(BASEPATH, 'model', names['model']))
|
{
"content_hash": "e1a5656bebd4804c072fee6bdd10a622",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 80,
"avg_line_length": 31.518828451882847,
"alnum_prop": 0.6110447364927651,
"repo_name": "primercuervo/cognitive_radio_ml",
"id": "8d8c7a16c0cd37b3ab06c8e3c7bceba4491b9c83",
"size": "7579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/cnn_k_1_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "950273"
},
{
"name": "Python",
"bytes": "68169"
},
{
"name": "TeX",
"bytes": "335108"
}
],
"symlink_target": ""
}
|
"""
Given a rectangular matrix of characters, add a border of asterisks(*) to it.
"""
def addBorder(picture):
picture = [''.join(('*',i,'*')) for i in picture]
for item in picture:
border = '*'*len(item)
indx = [0,(len(picture)+1)]
for item in indx:
picture.insert(item,border)
return picture
if __name__ == '__main__':
picture = ["a"]
print addBorder(picture)
|
{
"content_hash": "64c3e57a68cd467eb33979a4a43dc996",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 21.789473684210527,
"alnum_prop": 0.572463768115942,
"repo_name": "mayababuji/MyCodefights",
"id": "a07f101cbe39a152841953694eaab3e1a4e05f04",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "addBorder_REMOTE_4896.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21846"
}
],
"symlink_target": ""
}
|
import configparser
import copy
import datetime
from functools import cmp_to_key
import getpass
import logging
from io import StringIO
import os
import re
import sys
import traceback
import types
import textwrap
from optparse import OptionGroup
from optparse import OptionParser
from packstack.installer import basedefs
from packstack.installer import validators
from . import utils
from packstack.installer import processors
from packstack.installer import output_messages
from .exceptions import FlagValidationError
from .exceptions import ParamValidationError
from packstack.modules.common import filtered_hosts
from packstack.version import version_info
from packstack.installer.setup_controller import Controller
controller = Controller()
commandLineValues = {}
# List to hold all values to be masked in logging (i.e. passwords and sensitive data)
# TODO: read default values from conf_param?
masked_value_set = set()
tmpfiles = []
def initLogging(debug):
try:
logFile = os.path.join(basedefs.DIR_LOG, basedefs.FILE_LOG)
# Create the log file with specific permissions, puppet has a habbit of putting
# passwords in logs
os.close(os.open(logFile, os.O_CREAT | os.O_EXCL, 0o600))
hdlr = logging.FileHandler(filename=logFile, mode='w')
if (debug):
level = logging.DEBUG
else:
level = logging.INFO
fmts = '%(asctime)s::%(levelname)s::%(module)s::%(lineno)d::%(name)s:: %(message)s'
dfmt = '%Y-%m-%d %H:%M:%S'
fmt = logging.Formatter(fmts, dfmt)
hdlr.setFormatter(fmt)
logging.root.handlers = []
logging.root.addHandler(hdlr)
logging.root.setLevel(level)
except Exception:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_FAILED_INIT_LOGGER)
return logFile
def _getInputFromUser(param):
"""
this private func reads the data from the user
for the given param
"""
loop = True
userInput = None
try:
if param.USE_DEFAULT:
logging.debug("setting default value (%s) for key (%s)" % (mask(param.DEFAULT_VALUE), param.CONF_NAME))
controller.CONF[param.CONF_NAME] = param.DEFAULT_VALUE
else:
while loop:
# If the value was not supplied by the command line flags
if param.CONF_NAME not in commandLineValues:
message = StringIO()
message.write(param.PROMPT)
val_list = param.VALIDATORS or []
if(validators.validate_regexp not in val_list
and param.OPTION_LIST):
message.write(" [%s]" % "|".join(param.OPTION_LIST))
if param.DEFAULT_VALUE:
message.write(" [%s] " % (str(param.DEFAULT_VALUE)))
message.write(": ")
message.seek(0)
# mask password or hidden fields
if (param.MASK_INPUT):
userInput = getpass.getpass("%s :" % (param.PROMPT))
else:
userInput = raw_input(message.read())
else:
userInput = commandLineValues[param.CONF_NAME]
# If DEFAULT_VALUE is set and user did not input anything
if userInput == "" and len(str(param.DEFAULT_VALUE)) > 0:
userInput = param.DEFAULT_VALUE
# Param processing
userInput = process_param_value(param, userInput)
# If param requires validation
try:
validate_param_value(param, userInput)
controller.CONF[param.CONF_NAME] = userInput
loop = False
except ParamValidationError:
if param.LOOSE_VALIDATION:
# If validation failed but LOOSE_VALIDATION is true, ask user
answer = _askYesNo("User input failed validation, "
"do you still wish to use it")
loop = not answer
if answer:
controller.CONF[param.CONF_NAME] = userInput
continue
else:
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
else:
# Delete value from commandLineValues so that we will prompt the user for input
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
loop = True
except KeyboardInterrupt:
# add the new line so messages wont be displayed in the same line as the question
print("")
raise
except Exception:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_READ_INPUT_PARAM % (param.CONF_NAME))
def input_param(param):
"""
this func will read input from user
and ask confirmation if needed
"""
# We need to check if a param needs confirmation, (i.e. ask user twice)
# Do not validate if it was given from the command line
if param.NEED_CONFIRM and param.CONF_NAME not in commandLineValues:
# create a copy of the param so we can call it twice
confirmedParam = copy.deepcopy(param)
confirmedParamName = param.CONF_NAME + "_CONFIRMED"
confirmedParam.CONF_NAME = confirmedParamName
confirmedParam.PROMPT = output_messages.INFO_CONF_PARAMS_PASSWD_CONFIRM_PROMPT
# Now get both values from user (with existing validations)
while True:
_getInputFromUser(param)
_getInputFromUser(confirmedParam)
if controller.CONF[param.CONF_NAME] == controller.CONF[confirmedParamName]:
logging.debug("Param confirmation passed, value for both questions is identical")
break
else:
print(output_messages.INFO_VAL_PASSWORD_DONT_MATCH)
else:
_getInputFromUser(param)
return param
def _askYesNo(question=None):
message = StringIO()
while True:
askString = "\r%s? (yes|no): " % (question)
logging.debug("asking user: %s" % askString)
message.write(askString)
message.seek(0)
raw = raw_input(message.read())
if not len(raw):
continue
answer = raw[0].lower()
logging.debug("user answered read: %s" % (answer))
if answer not in 'yn':
continue
return answer == 'y'
def _addDefaultsToMaskedValueSet():
"""
For every param in conf_params
that has MASK_INPUT enabled keep the default value
in the 'masked_value_set'
"""
global masked_value_set
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
# Keep default password values masked, but ignore default empty values
if ((param.MASK_INPUT is True) and param.DEFAULT_VALUE != ""):
masked_value_set.add(param.DEFAULT_VALUE)
def _updateMaskedValueSet():
"""
For every param in conf
has MASK_INPUT enabled keep the user input
in the 'masked_value_set'
"""
global masked_value_set
for confName in controller.CONF:
# Add all needed values to masked_value_set
if (controller.getParamKeyValue(confName, "MASK_INPUT") is True):
masked_value_set.add(controller.CONF[confName])
def mask(input):
"""
Gets a dict/list/str and search maksked values in them.
The list of masked values in is masked_value_set and is updated
via the user input
If it finds, it replaces them with '********'
"""
output = copy.deepcopy(input)
if isinstance(input, dict):
for key in input:
if isinstance(input[key], str):
output[key] = utils.mask_string(input[key],
masked_value_set)
if isinstance(input, list):
for item in input:
org = item
orgIndex = input.index(org)
if isinstance(item, str):
item = utils.mask_string(item, masked_value_set)
if item != org:
output.remove(org)
output.insert(orgIndex, item)
if isinstance(input, str):
output = utils.mask_string(input, masked_value_set)
return output
def removeMaskString(maskedString):
"""
remove an element from masked_value_set
we need to itterate over the set since
calling set.remove() on an string that does not exit
will raise an exception
"""
global masked_value_set
# Since we cannot remove an item from a set during itteration over
# the said set, we only mark a flag and if the flag is set to True
# we remove the string from the set.
found = False
for item in masked_value_set:
if item == maskedString:
found = True
if found:
masked_value_set.remove(maskedString)
def validate_param_value(param, value):
cname = param.CONF_NAME
logging.debug("Validating parameter %s." % cname)
val_list = param.VALIDATORS or []
opt_list = param.OPTION_LIST
for val_func in val_list:
try:
val_func(value, opt_list)
except ParamValidationError as ex:
print('Parameter %s failed validation: %s' % (cname, ex))
raise
def process_param_value(param, value):
_value = value
proclist = param.PROCESSORS or []
for proc_func in proclist:
is_silent = getattr(proc_func, 'silent', False)
logging.debug("Processing value of parameter "
"%s." % param.CONF_NAME)
try:
new_value = proc_func(_value, param.CONF_NAME, controller.CONF)
if new_value != _value:
if param.MASK_INPUT is False and not is_silent:
msg = output_messages.INFO_CHANGED_VALUE
print(msg % (_value, new_value))
_value = new_value
else:
logging.debug("Processor returned the original "
"value: %s" % _value)
except processors.ParamProcessingError as ex:
print("Value processing of parameter %s "
"failed.\n%s" % (param.CONF_NAME, ex))
raise
return _value
def _handleGroupCondition(config, conditionName, conditionValue):
"""
handle params group pre/post condition
checks if a group has a pre/post condition
and validates the params related to the group
"""
# If the post condition is a function
if callable(conditionName):
# Call the function conditionName with conf as the arg
conditionValue = conditionName(controller.CONF)
# If the condition is a string - just read it to global conf
# We assume that if we get a string as a member it is the name of a member of conf_params
elif isinstance(conditionName, str):
conditionValue = _loadParamFromFile(config, "general", conditionName)
else:
# Any other type is invalid
raise TypeError("%s type (%s) is not supported" % (conditionName, type(conditionName)))
return conditionValue
def _loadParamFromFile(config, section, param_name):
"""
read param from file
validate it
and load to to global conf dict
"""
param = controller.getParamByName(param_name)
# Get value from answer file
try:
value = config.get(section, param_name)
except configparser.NoOptionError:
value = None
# Check for deprecated parameters
deprecated = param.DEPRECATES if param.DEPRECATES is not None else []
for old_name in deprecated:
try:
val = config.get(section, old_name)
except configparser.NoOptionError:
continue
if not val:
# value is empty string
continue
if value is None:
value = val
if value != val:
raise ValueError('Parameter %(param_name)s deprecates '
'following parameters:\n%(deprecated)s.\n'
'Please either use parameter %(param_name)s '
'or use same value for all deprecated '
'parameters.' % locals())
if deprecated and value is not None:
controller.MESSAGES.append('Deprecated parameter has been used '
'in answer file. Please use parameter '
'%(param_name)s next time. This '
'parameter deprecates following '
'parameters: %(deprecated)s.'
% locals())
if value is None:
# Let's use default value if we have one
value = getattr(param, 'DEFAULT_VALUE', None)
if value is None:
raise KeyError('Parser cannot find option %s in answer file.'
% param_name)
# Validate param value using its validation func
value = process_param_value(param, value)
validate_param_value(param, value)
# Keep param value in our never ending global conf
controller.CONF[param.CONF_NAME] = value
# Add message to controller.MESSAGES if defined in parameter
if param.MESSAGE:
_handleParamMessage(param, value)
return value
def _handleAnswerFileParams(answerFile):
"""
handle loading and validating
params from answer file
supports reading single or group params
"""
try:
logging.debug("Starting to handle config file")
# Read answer file
fconf = configparser.RawConfigParser()
fconf.read(answerFile)
# Iterate all the groups and check the pre/post conditions
for group in controller.getAllGroups():
# Get all params per group
# Handle pre conditions for group
preConditionValue = True
if group.PRE_CONDITION:
preConditionValue = _handleGroupCondition(fconf, group.PRE_CONDITION, preConditionValue)
# Handle pre condition match with case insensitive values
if preConditionValue == group.PRE_CONDITION_MATCH:
for param in group.parameters.itervalues():
_loadParamFromFile(fconf, "general", param.CONF_NAME)
# Handle post conditions for group only if pre condition passed
postConditionValue = True
if group.POST_CONDITION:
postConditionValue = _handleGroupCondition(fconf, group.POST_CONDITION, postConditionValue)
# Handle post condition match for group
if postConditionValue != group.POST_CONDITION_MATCH:
logging.error("The group condition (%s) returned: %s, which differs from the excpeted output: %s" %
(group.GROUP_NAME, postConditionValue, group.POST_CONDITION_MATCH))
raise ValueError(output_messages.ERR_EXP_GROUP_VALIDATION_ANS_FILE %
(group.GROUP_NAME, postConditionValue, group.POST_CONDITION_MATCH))
else:
logging.debug("condition (%s) passed" % group.POST_CONDITION)
else:
logging.debug("no post condition check for group %s" % group.GROUP_NAME)
else:
logging.debug("skipping params group %s since value of group validation is %s" % (group.GROUP_NAME, preConditionValue))
except Exception as e:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_HANDLE_ANSWER_FILE % (e))
def _getanswerfilepath():
path = None
msg = "Could not find a suitable path on which to create the answerfile"
ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
p = os.path.expanduser("~/")
if os.access(p, os.W_OK):
path = os.path.abspath(os.path.join(p, "packstack-answers-%s.txt" % ts))
msg = "A new answerfile was created in: %s" % path
controller.MESSAGES.append(msg)
return path
def _gettmpanswerfilepath():
path = None
msg = "Could not find a suitable path on which to create the temporary answerfile"
ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
p = os.path.expanduser("~/")
if os.access(p, os.W_OK):
path = os.path.abspath(os.path.join(p, "tmp-packstack-answers-%s.txt" % ts))
tmpfiles.append(path)
return path
def _handleInteractiveParams():
try:
logging.debug("Groups: %s" % ', '.join([x.GROUP_NAME for x in controller.getAllGroups()]))
for group in controller.getAllGroups():
preConditionValue = True
logging.debug("going over group %s" % group.GROUP_NAME)
# If pre_condition is set, get Value
if group.PRE_CONDITION:
preConditionValue = _getConditionValue(group.PRE_CONDITION)
inputLoop = True
# If we have a match, i.e. condition returned True, go over all params in the group
if preConditionValue == group.PRE_CONDITION_MATCH:
while inputLoop:
for param in group.parameters.itervalues():
if not param.CONDITION:
input_param(param)
# update password list, so we know to mask them
_updateMaskedValueSet()
postConditionValue = True
# If group has a post condition, we check it after we get the input from
# all the params in the group. if the condition returns False, we loop over the group again
if group.POST_CONDITION:
postConditionValue = _getConditionValue(group.POST_CONDITION)
if postConditionValue == group.POST_CONDITION_MATCH:
inputLoop = False
else:
# we clear the value of all params in the group
# in order to re-input them by the user
for param in group.parameters.itervalues():
if param.CONF_NAME in controller.CONF:
del controller.CONF[param.CONF_NAME]
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
else:
inputLoop = False
else:
logging.debug("no post condition check for group %s" % group.GROUP_NAME)
_displaySummary()
except KeyboardInterrupt:
logging.error("keyboard interrupt caught")
raise Exception(output_messages.ERR_EXP_KEYBOARD_INTERRUPT)
except Exception:
logging.error(traceback.format_exc())
raise
except Exception:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_HANDLE_PARAMS)
def _handleParams(configFile):
_addDefaultsToMaskedValueSet()
if configFile:
_handleAnswerFileParams(configFile)
else:
_handleInteractiveParams()
def _getConditionValue(matchMember):
returnValue = False
if isinstance(matchMember, types.FunctionType):
returnValue = matchMember(controller.CONF)
elif isinstance(matchMember, str):
# we assume that if we get a string as a member it is the name
# of a member of conf_params
if matchMember not in controller.CONF:
param = controller.getParamByName(matchMember)
input_param(param)
returnValue = controller.CONF[matchMember]
else:
raise TypeError("%s type (%s) is not supported" % (matchMember, type(matchMember)))
return returnValue
def _displaySummary():
print(output_messages.INFO_DSPLY_PARAMS)
print("=" * (len(output_messages.INFO_DSPLY_PARAMS) - 1))
logging.info("*** User input summary ***")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if not param.USE_DEFAULT and param.CONF_NAME in controller.CONF:
cmdOption = param.CMD_OPTION
length = 30 - len(cmdOption)
maskParam = param.MASK_INPUT
# Only call mask on a value if the param has MASK_INPUT set to True
if maskParam:
logging.info("%s: %s" % (cmdOption, mask(controller.CONF[param.CONF_NAME])))
print("%s:" % (cmdOption) + " " * length + mask(controller.CONF[param.CONF_NAME]))
else:
# Otherwise, log & display it as it is
logging.info("%s: %s" % (cmdOption, str(controller.CONF[param.CONF_NAME])))
print("%s:" % (cmdOption) + " " * length + str(controller.CONF[param.CONF_NAME]))
logging.info("*** User input summary ***")
answer = _askYesNo(output_messages.INFO_USE_PARAMS)
if not answer:
logging.debug("user chose to re-enter the user parameters")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if param.CONF_NAME in controller.CONF:
if not param.MASK_INPUT:
param.DEFAULT_VALUE = controller.CONF[param.CONF_NAME]
# Remove the string from mask_value_set in order
# to remove values that might be over overwritten.
removeMaskString(controller.CONF[param.CONF_NAME])
del controller.CONF[param.CONF_NAME]
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
print("")
logging.debug("calling handleParams in interactive mode")
return _handleParams(None)
else:
logging.debug("user chose to accept user parameters")
def _printAdditionalMessages():
if len(controller.MESSAGES) > 0:
print(output_messages.INFO_ADDTIONAL_MSG)
for msg in controller.MESSAGES:
print(output_messages.INFO_ADDTIONAL_MSG_BULLET % (msg))
def _addFinalInfoMsg(logFile):
"""
add info msg to the user finalizing the
successfull install of rhemv
"""
controller.MESSAGES.append(output_messages.INFO_LOG_FILE_PATH % (logFile))
controller.MESSAGES.append(
output_messages.INFO_MANIFEST_PATH % (basedefs.PUPPET_MANIFEST_DIR))
def _summaryParamsToLog():
if len(controller.CONF) > 0:
logging.debug("*** The following params were used as user input:")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if param.CONF_NAME in controller.CONF:
maskedValue = mask(controller.CONF[param.CONF_NAME])
logging.debug("%s: %s" % (param.CMD_OPTION, maskedValue))
def _handleParamMessage(param, value):
"""
add message to the information displayed at the end of the execution
for parameters with MESSAGE option. if parameter has MESSAGE_VALUES
option, message will be only displayed if the provided value is in
MESSAGE_VALUES.
"""
message_values = param.MESSAGE_VALUES if param.MESSAGE_VALUES is not None else None
if not message_values or value in message_values:
message = utils.color_text('Parameter %s: %s'
% (param.CONF_NAME, param.MESSAGE), 'red')
if message not in controller.MESSAGES:
controller.MESSAGES.append(message)
def runSequences():
controller.runAllSequences()
def _main(options, configFile=None, logFile=None):
print(output_messages.INFO_HEADER)
print("\n" + output_messages.INFO_LOG_FILE_PATH % logFile)
# Get parameters
_handleParams(configFile)
# Generate answer file, only if no answer file was provided
if not options.answer_file:
path = _getanswerfilepath()
if path:
generateAnswerFile(path)
# If an answer file was provided, some options may have been overridden
# Overwrite answer file with updated options
else:
generateAnswerFile(options.answer_file)
# Update masked_value_list with user input values
_updateMaskedValueSet()
# Print masked conf
logging.debug(mask(controller.CONF))
# Start configuration stage
print("\n" + output_messages.INFO_INSTALL)
# Initialize Sequences
initPluginsSequences()
# Run main setup logic
runSequences()
# Lock rhevm version
# _lockRpmVersion()
# Print info
_addFinalInfoMsg(logFile)
print(output_messages.INFO_INSTALL_SUCCESS)
def remove_remote_var_dirs(options, config, messages):
"""
Removes the temp directories on remote hosts,
doesn't remove data on localhost
"""
for host in filtered_hosts(config):
try:
host_dir = config['HOST_DETAILS'][host]['tmpdir']
except KeyError:
# Nothing was added to this host yet, so we have nothing to delete
continue
if options.debug:
# we keep temporary directories on hosts in debug mode
messages.append(
'Note temporary directory {host_dir} on host {host} was '
'not deleted for debugging purposes.'.format(**locals())
)
continue
logging.debug(output_messages.INFO_REMOVE_REMOTE_VAR % (host_dir, host))
server = utils.ScriptRunner(host)
server.append('rm -rf %s' % host_dir)
try:
server.execute()
except Exception as e:
msg = output_messages.ERR_REMOVE_REMOTE_VAR % (host_dir, host)
logging.error(msg)
logging.exception(e)
messages.append(utils.color_text(msg, 'red'))
def remove_temp_files():
"""
Removes any temporary files generated during
configuration
"""
for myfile in tmpfiles:
try:
os.unlink(myfile)
except Exception as e:
msg = output_messages.ERR_REMOVE_TMP_FILE % (myfile)
logging.error(msg)
logging.exception(e)
controller.MESSAGES.append(utils.color_text(msg, 'red'))
def generateAnswerFile(outputFile, overrides={}):
sep = os.linesep
fmt = ("%(comment)s%(separator)s%(conf_name)s=%(default_value)s"
"%(separator)s")
outputFile = os.path.expanduser(outputFile)
# Remove the answer file so it can be recreated as the current user with
# the mode -rw-------
if os.path.exists(outputFile):
os.remove(outputFile)
fd = os.open(outputFile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o600)
with os.fdopen(fd, "w") as ans_file:
ans_file.write("[general]%s" % os.linesep)
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
comm = param.USAGE or ''
comm = textwrap.fill(comm,
initial_indent='%s# ' % sep,
subsequent_indent='# ',
break_long_words=False)
value = controller.CONF.get(param.CONF_NAME,
param.DEFAULT_VALUE)
args = {'comment': comm,
'separator': sep,
'default_value': overrides.get(param.CONF_NAME, value),
'conf_name': param.CONF_NAME}
ans_file.write(fmt % args)
def validate_answer_file_options(answerfile_path):
if not os.path.exists(answerfile_path):
raise Exception(
output_messages.ERR_NO_ANSWER_FILE % answerfile_path)
answerfile = configparser.RawConfigParser()
answerfile.read(answerfile_path)
sections = answerfile._sections
general_sections = sections.get('general', None)
if len(sections) != 1:
raise Exception('Expected single section')
if not general_sections:
raise Exception('Expected section [general]')
general_sections.pop('__name__')
answerfile_options = set([key.upper() for key in general_sections])
possible_options = set()
for group in controller.getAllGroups():
possible_options.update([key.upper() for key in group.parameters])
difference = answerfile_options - possible_options
if difference:
raise Exception(
'Found unexpected answerfile options {}'.format(list(difference)))
print('Provided answerfile does not contain any unexpected options.')
def single_step_aio_install(options, logFile):
"""Installs an All in One host on this host."""
options.install_hosts = utils.get_localhost_ip()
single_step_install(options, logFile)
def single_step_install(options, logFile):
answerfilepath = _gettmpanswerfilepath()
if not answerfilepath:
_printAdditionalMessages()
return
# We're going to generate the answerfile and run Packstack in a single step
# todo this we generate the answerfile and pass in some override variables to
# override the default hosts
overrides = {}
hosts = options.install_hosts
hosts = [host.strip() for host in hosts.split(',')]
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
# and directives that contain _HOST are set to the controller node
if param.CONF_NAME.find("_HOST") != -1:
overrides[param.CONF_NAME] = hosts[0]
# If there are more than one host, all but the first are a compute nodes
if len(hosts) > 1:
overrides["CONFIG_COMPUTE_HOSTS"] = ','.join(hosts[1:])
# We can also override defaults with command line options
_set_command_line_values(options)
for key, value in commandLineValues.items():
overrides[key] = value
generateAnswerFile(answerfilepath, overrides)
_main(options, answerfilepath, logFile)
def initCmdLineParser():
"""
Initiate the optparse object, add all the groups and general command line flags
and returns the optparse object
"""
# Init parser and all general flags
usage = "usage: %prog [options] [--help]"
parser = OptionParser(usage=usage, version="%prog {0}".format(version_info.version_string()))
parser.add_option("--gen-answer-file", help="Generate a template of an answer file.")
parser.add_option("--validate-answer-file", help="Check if answerfile contains unexpected options.")
parser.add_option("--answer-file", help="Runs the configuration in non-interactive mode, extracting all information from the"
"configuration file. using this option excludes all other options")
parser.add_option("--install-hosts", help="Install on a set of hosts in a single step. The format should be a comma separated list "
"of hosts, the first is setup as a controller, and the others are setup as compute nodes."
"if only a single host is supplied then it is setup as an all in one installation. An answerfile "
"will also be generated and should be used if Packstack needs to be run a second time ")
parser.add_option("--allinone", action="store_true", help="Shorthand for --install-hosts=<local ipaddr> --novanetwork-pubif=<dev> "
"--novacompute-privif=lo --novanetwork-privif=lo --os-swift-install=y"
", this option can be used to install an all in one OpenStack on this host")
parser.add_option("-t", "--timeout", default=300, help="The timeout for puppet Exec calls")
parser.add_option("-o", "--options", action="store_true", dest="options", help="Print details on options available in answer file(rst format)")
parser.add_option("-d", "--debug", action="store_true", default=False, help="Enable debug in logging")
parser.add_option("-y", "--dry-run", action="store_true", default=False, help="Don't execute, just generate manifests")
# For each group, create a group option
for group in controller.getAllGroups():
groupParser = OptionGroup(parser, group.DESCRIPTION)
for param in group.parameters.itervalues():
cmdOption = param.CMD_OPTION
paramUsage = param.USAGE
optionsList = param.OPTION_LIST
useDefault = param.USE_DEFAULT
if not useDefault:
groupParser.add_option("--%s" % cmdOption, help=paramUsage)
# Add group parser to main parser
parser.add_option_group(groupParser)
return parser
def printOptions():
"""
print and document the available options to the answer file (rst format)
"""
# For each group, create a group option
for group in controller.getAllGroups():
print("%s" % group.DESCRIPTION)
print("-" * len(group.DESCRIPTION) + "\n")
for param in group.parameters.itervalues():
cmdOption = param.CONF_NAME
paramUsage = param.USAGE
optionsList = param.OPTION_LIST or ""
print("%s" % (("**%s**" % str(cmdOption)).ljust(30)))
print(" %s" % paramUsage + "\n")
def plugin_compare(x, y):
"""
Used to sort the plugin file list
according to the number at the end of the plugin module
"""
x_match = re.search(r'.+\_(\d\d\d)', x)
x_cmp = x_match.group(1)
y_match = re.search(r'.+\_(\d\d\d)', y)
y_cmp = y_match.group(1)
return int(x_cmp) - int(y_cmp)
def loadPlugins():
"""
Load All plugins from ./plugins
"""
sys.path.append(basedefs.DIR_PLUGINS)
sys.path.append(basedefs.DIR_MODULES)
fileList = [f for f in os.listdir(basedefs.DIR_PLUGINS) if f[0] != "_"]
fileList = sorted(fileList, key=cmp_to_key(plugin_compare))
for item in fileList:
# Looking for files that end with ###.py, example: a_plugin_100.py
match = re.search(r'^(.+\_\d\d\d)\.py$', item)
if match:
try:
moduleToLoad = match.group(1)
logging.debug("importing module %s, from file %s", moduleToLoad, item)
moduleobj = __import__(moduleToLoad)
moduleobj.__file__ = os.path.join(basedefs.DIR_PLUGINS, item)
globals()[moduleToLoad] = moduleobj
checkPlugin(moduleobj)
controller.addPlugin(moduleobj)
except Exception:
logging.error("Failed to load plugin from file %s", item)
logging.error(traceback.format_exc())
raise Exception("Failed to load plugin from file %s" % item)
def checkPlugin(plugin):
for funcName in ['initConfig', 'initSequences']:
if not hasattr(plugin, funcName):
raise ImportError("Plugin %s does not contain the %s function" % (plugin.__class__, funcName))
def countCmdLineFlags(options, flag):
"""
counts all command line flags that were supplied, excluding the supplied flag name
"""
counter = 0
# make sure only flag was supplied
for key, value in options.__dict__.items():
if key in (flag, 'debug', 'timeout', 'dry_run', 'default_password'):
next
# If anything but flag was called, increment
elif value:
counter += 1
return counter
def validateSingleFlag(options, flag):
counter = countCmdLineFlags(options, flag)
if counter > 0:
flag = flag.replace("_", "-")
msg = output_messages.ERR_ONLY_1_FLAG % ("--%s" % flag)
raise FlagValidationError(msg)
def initPluginsConfig():
for plugin in controller.getAllPlugins():
plugin.initConfig(controller)
def initPluginsSequences():
for plugin in controller.getAllPlugins():
plugin.initSequences(controller)
def _set_command_line_values(options):
for key, value in options.__dict__.items():
# Replace the _ with - in the string since optparse replace _ with -
for group in controller.getAllGroups():
param = group.search("CMD_OPTION", key.replace("_", "-"))
if len(param) > 0 and value:
commandLineValues[param[0].CONF_NAME] = value
def main():
options = ""
try:
# Load Plugins
loadPlugins()
initPluginsConfig()
optParser = initCmdLineParser()
# Do the actual command line parsing
# Try/Except are here to catch the silly sys.exit(0) when calling rhevm-setup --help
(options, args) = optParser.parse_args()
if options.options:
printOptions()
raise SystemExit
# Initialize logging
logFile = initLogging(options.debug)
# Parse parameters
runConfiguration = True
confFile = None
controller.CONF['DEFAULT_EXEC_TIMEOUT'] = options.timeout
controller.CONF['DRY_RUN'] = options.dry_run
controller.CONF['DIR_LOG'] = basedefs.DIR_LOG
if options.validate_answer_file:
answerfilepath = options.validate_answer_file
validate_answer_file_options(answerfilepath)
elif options.gen_answer_file:
answerfilepath = _gettmpanswerfilepath()
if not answerfilepath:
_printAdditionalMessages()
return
# We can also override defaults with command line options
overrides = {}
_set_command_line_values(options)
for key, value in commandLineValues.items():
overrides[key] = value
generateAnswerFile(answerfilepath, overrides)
_handleParams(answerfilepath)
generateAnswerFile(options.gen_answer_file)
# Are we installing an all in one
elif options.allinone:
if getattr(options, 'answer_file', None):
msg = ('Please use either --allinone or --answer-file, '
'but not both.')
raise FlagValidationError(msg)
single_step_aio_install(options, logFile)
# Are we installing in a single step
elif options.install_hosts:
single_step_install(options, logFile)
# Otherwise, run main()
else:
# Make sure only --answer-file was supplied
if options.answer_file:
validateSingleFlag(options, "answer_file")
# If using an answer file, setting a default password
# does not really make sense
if getattr(options, 'default_password', None):
msg = ('Please do not set --default-password '
'when specifying an answer file.')
raise FlagValidationError(msg)
confFile = os.path.expanduser(options.answer_file)
if not os.path.exists(confFile):
raise Exception(output_messages.ERR_NO_ANSWER_FILE % confFile)
else:
_set_command_line_values(options)
_main(options, confFile, logFile)
except FlagValidationError as ex:
optParser.error(str(ex))
except Exception as e:
logging.error(traceback.format_exc())
print("\n" + utils.color_text("ERROR : " + str(e), 'red'))
try:
print(output_messages.ERR_CHECK_LOG_FILE_FOR_MORE_INFO % (logFile))
except NameError:
pass
sys.exit(1)
finally:
remove_remote_var_dirs(options, controller.CONF, controller.MESSAGES)
remove_temp_files()
# Always print user params to log
_printAdditionalMessages()
_summaryParamsToLog()
if __name__ == "__main__":
main()
|
{
"content_hash": "9082112529d9f88cb8b266a2cec73f69",
"timestamp": "",
"source": "github",
"line_count": 1065,
"max_line_length": 147,
"avg_line_length": 37.95399061032864,
"alnum_prop": 0.5974369758293956,
"repo_name": "fr34k8/packstack",
"id": "dbb421034e3c3fa355d677c92bbc3906cf4de8a9",
"size": "40967",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packstack/installer/run_setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "169748"
},
{
"name": "Python",
"bytes": "511156"
},
{
"name": "Ruby",
"bytes": "11197"
},
{
"name": "Shell",
"bytes": "27842"
}
],
"symlink_target": ""
}
|
import unittest
class TestEventProducerLastUpdate(unittest.TestCase):
pass
|
{
"content_hash": "693458f32f86494d96c9a625736d4d96",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 15.6,
"alnum_prop": 0.8461538461538461,
"repo_name": "yashodhank/frappe",
"id": "ccdea6c694b86877e791c8c34bbc7923007f5b40",
"size": "181",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/event_streaming/doctype/event_producer_last_update/test_event_producer_last_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "320627"
},
{
"name": "GCC Machine Description",
"bytes": "2474"
},
{
"name": "HTML",
"bytes": "179539"
},
{
"name": "JavaScript",
"bytes": "1099003"
},
{
"name": "Python",
"bytes": "1430023"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
def extractDjurasicoBlogspotCom(item):
'''
Parser for 'djurasico.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Common Sense of a Dukes daughter', 'Common Sense of a Duke\'s Daughter', 'translated'),
('Common Sense of a Duke\'s Daughter', 'Common Sense of a Duke\'s Daughter', 'translated'),
('Koushaku Reijou no Tashinami', 'Common Sense of a Duke\'s Daughter', 'translated'),
('Koushaku Reijou no Tashinami novel', 'Common Sense of a Duke\'s Daughter', 'translated'),
('The adventurer who received undesired immortality', 'Unwilling Undead Adventurer', 'translated'),
('Garudeina Oukoku Koukoku Ki', 'Garudeina Oukoku Koukoku Ki', 'translated'),
('Maidens grand summoning', 'Maidens grand summoning', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "bac166e0d925145da5900cf72b2462fc",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 135,
"avg_line_length": 55.74074074074074,
"alnum_prop": 0.5295681063122923,
"repo_name": "fake-name/ReadableWebProxy",
"id": "4a036fcc7580adc8169e67d3bc7d84632d8ceb59",
"size": "1505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractDjurasicoBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
"""
MLine (:mod:`skrf.media.MLine`)
========================================
.. autosummary::
:toctree: generated/
MLine
"""
import numpy as npy
from numpy import log, log10, tanh, sqrt, exp, real, imag, cosh, \
ones, zeros, arctan
from scipy.constants import epsilon_0, mu_0, c, pi
from .media import Media
from ..tlineFunctions import skin_depth, surface_resistivity
from ..constants import NumberLike
from typing import Union, TYPE_CHECKING
import warnings
if TYPE_CHECKING:
from .. frequency import Frequency
class MLine(Media):
r"""
A microstripline transmission line defined in terms of width, thickness
and height on a given relative permittivity substrate. The line has a
conductor resistivity and a tangential loss factor.
This class is highly inspired from the technical documentation [#]_
and sources provided by the qucs project [#]_ .
In addition, Djordjevic [#]_ /Svensson [#]_ wideband debye dielectric
model is considered to provide more realistic modelling of broadband
microstrip with as causal time domain response.
A compatibility mode is provided to mimic the behaviour of QUCS or of
Keysight ADS. There is known differences in the output of these
simulators.
The quasi-static models of chercteristic impedance and effective
permittivity give the value at zero frequency. The dispersion models
compute a frequency-dependant values of these variables.
* Quasi-static characteristic impedance and effective permittivity models:
+ Hammerstad and Jensen (default)
+ Schneider
+ Wheeler
* Frequency dispersion of impedance and effective permittivity models:
+ Hammerstad and Jensen
+ Kirschning and Jansen (default)
+ Kobayashi
+ Schneider
+ Yamashita
+ (No dispersion)
* Strip thickness correction model:
+ all quasi-static models add a certain amount to W to accound for
non-zero thickness of the strip. Computation with zero thickness is
possible.
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency` object
frequency band of the media
z0 : number, array-like, or None
the port impedance for media. Only needed if different from the
characteristic impedance Z0 of the transmission line. In ohm
w : number, or array-like
width of conductor, in m
h : number, or array-like
height of substrate between ground plane and conductor, in m
t : number, or array-like or None, optional
conductor thickness, in m. Default is None (no width correction
to account strip thickness).
ep_r : number, or array-like
relative permittivity of dielectric at frequency f_epr_tand, no unit
mu_r : number, array-like
relative permeability mof dielectric, no unit
model : str
microstripline quasi-static impedance and dielectric model in:
* 'hammerstadjensen' (default)
* 'schneider'
* 'wheeler'
disp : str
microstripline impedance and dielectric frequency dispersion model in:
* 'hammerstadjensen'
* 'kirschningjansen' (default)
* 'kobayashi'
* 'schneider'
* 'yamashita'
* 'none'
diel : str
dielectric frequency dispersion model in:
* 'djordjevicsvensson' (default)
* 'frequencyinvariant'
rho: number, or array-like, optional
resistivity of conductor, ohm / m
tand : number, or array-like
dielectric loss factor at frequency f_epr_tand
rough : number, or array-like
RMS roughness of conductor in m
f_low : number, or array-like
lower frequency for wideband Debye Djordjevic/Svensson dielectric
model, in Hz
f_high : number, or array-like
higher frequency for wideband Debye Djordjevic/Svensson dielectric
model, in Hz
f_epr_tand : number, or array-like
measurement frequency for ep_r and tand of dielectric, in Hz
compatibility_mode: str or None (default)
If set to 'qucs', following behavious happens :
* Characteristic impedance will be real (no imaginary part due to tand)
* Quasi-static relative permittivity and impedance will by used for
loss computation instead of frequency-dispersed values
* Kobayashi and Yamashita models will disperse permittivity but keep
quasi-static impedance values
\*args, \*\*kwargs : arguments, keyword arguments
passed to :class:`~skrf.media.media.Media`'s constructor
(:func:`~skrf.media.media.Media.__init__`
Note
----
In the case dispersion model only include effective permittivity, no
dispersion is used for impedance in QUCS mode and Kirschning Jansen is
used in ADS mode. QUCS mode is the default.
When the thickness of the strip is smaller than 3 skin depth, the losses
model gives over-optimistic results and the media will issue a warning.
At DC, the losses of the line could be smaller than its conductor
resistance, which is not physical.
References
----------
.. [#] http://qucs.sourceforge.net/docs/technical.pdf
.. [#] https://github.com/Qucs/qucsator/blob/develop/src/components/microstrip/msline.cpp
.. [#] E. Hammerstad and Ø. Jensen,
"Accurate Models for Microstrip Computer-Aided Design",
Symposium on Microwave Theory and Techniques, pp. 407-409, June 1980.
.. [#] M. Kirschning and R. H. Jansen,
"Accurate Model for Effective Dielectric Constant of Microstrip with
Validity up to Millimeter-Wave Frequencies", Electronics Letters,
vol. 8, no. 6, pp. 272-273, Mar. 1982.
.. [#] R. H. Jansen and M. Kirschning,
"Arguments and an accurate Model for the Power-Current Formulation of
Microstrip Characteristic Impedance",
Archiv für Elektronik und Übertragungstechnik (AEÜ), vol. 37,
pp. 108-112, 1983.
.. [#] M. Kobayashi,
"A Dispersion Formula Satisfying Recent Requirements in Microstrip
CAD", IEEE Trans. on Microwave Theory and Techniques, vol. 36, no. 8,
pp. 1246-1250, Aug. 1988.
.. [#] M. V. Schneider,
"Microstrip Lines for Microwave Integrated Circuits",
The Bell System Technical Journal, vol. 48, pp. 1421-1444, May 1969.
.. [#] M. V. Schneider, "Microstrip Dispersion", Proceedings of the IEEE,
Letters, vol. 60, Jan. 1972, pp. 144-146.
.. [#] H. A. Wheeler,
"Transmission-Line Properties of a Strip on a Dielectric Sheet on a
Plane, IEEE Trans. on Microwave Theory and Techniques, vol. 25, no. 8,
pp. 631-647, Aug. 1977.
.. [#] H. A. Wheeler, "Formulas for the Skin Effect,"
Proceedings of the IRE, vol. 30, no. 9, pp. 412-424, Sept. 1942.
.. [#] E. Yamashita, K. Atsuki, and T. Ueda,
"An Approximate Dispersion Formula of Microstrip Lines for Computer
Aided Design of Microwave Integrated Circuits", IEEE Trans. on
Microwave Theory and Techniques, vol. 27, pp. 1036-1038, Dec. 1979.
.. [#] C. Svensson, G.E. Dermer,
Time domain modeling of lossy interconnects,
IEEE Trans. on Advanced Packaging, May 2001, N2, Vol. 24, pp.191-196.
.. [#] Djordjevic, R.M. Biljic, V.D. Likar-Smiljanic, T.K. Sarkar,
Wideband frequency-domain characterization of FR-4 and time-domain
causality,
IEEE Trans. on EMC, vol. 43, N4, 2001, p. 662-667.
"""
def __init__(self, frequency: Union['Frequency', None] = None,
z0: Union[NumberLike, None] = None,
w: NumberLike = 3, h: NumberLike = 1.6,
t: Union[NumberLike, None] = None,
ep_r: NumberLike = 4.5,
mu_r: NumberLike = 1.0,
model: str = 'hammerstadjensen',
disp: str = 'kirschningjansen',
diel: str = 'djordjevicsvensson',
rho: NumberLike = 1.68e-8, tand: NumberLike = 0,
rough: NumberLike = 0.15e-6,
f_low: NumberLike = 1e3, f_high: NumberLike = 1e12,
f_epr_tand: NumberLike = 1e9,
compatibility_mode: Union[str, None] = None,
*args, **kwargs):
Media.__init__(self, frequency = frequency, z0 = z0)
self.w, self.h, self.t = w, h, t
self.ep_r, self.mu_r = ep_r, mu_r
self.model, self.disp, self.diel = model, disp, diel
self.rho, self.tand, self.rough, self.disp = rho, tand, rough, disp
self.f_low, self.f_high, self.f_epr_tand = f_low, f_high, f_epr_tand
self.compatibility_mode = compatibility_mode
# variation ofeffective permittivity with frequency
# Not implemented on QUCS but implemented on ADS.
# 'frequencyinvariant' will give a constant complex value whith a real
# part compatible with qucs and an imaginary part due to tand
self.ep_r_f, self.tand_f = self.analyse_dielectric(
self.ep_r, self.tand,
self.f_low, self.f_high, self.f_epr_tand, self.frequency.f,
self.diel)
# quasi-static effective permittivity of substrate + line and
# the impedance of the microstrip line
# qucs use real-valued ep_r giving real-valued impedance
if compatibility_mode == 'qucs':
self.zl_eff, self.ep_reff, self.w_eff = self.analyse_quasi_static(
real(self.ep_r_f), self.w, self.h, self.t, self.model)
# ads use complex permittivity giving complex impedance and
# effective permittivity
else:
self.zl_eff, self.ep_reff, self.w_eff = self.analyse_quasi_static(
self.ep_r_f, self.w, self.h, self.t, self.model)
# analyse dispersion of impedance and relatice permittivity
# qucs use w here, but w_eff seems better
if compatibility_mode == 'qucs':
self._z_characteristic, self.ep_reff_f = self.analyse_dispersion(
self.zl_eff, self.ep_reff, real(self.ep_r_f),
self.w, self.w_eff, self.h, self.t,
self.frequency.f, self.disp)
else:
self._z_characteristic, self.ep_reff_f = self.analyse_dispersion(
self.zl_eff, self.ep_reff, self.ep_r_f,
self.w_eff, self.w_eff, self.h, self.t,
self.frequency.f, self.disp)
# analyse losses of line
# qucs use quasi-static values here, leading to a difference
# against ads
if compatibility_mode == 'qucs':
self.alpha_conductor, self.alpha_dielectric = self.analyse_loss(
real(self.ep_r_f), real(self.ep_reff), self.tand_f,
self.rho, self.mu_r,
real(self.zl_eff), real(self.zl_eff),
self.frequency.f, self.w, self.t, self.rough)
else:
self.alpha_conductor, self.alpha_dielectric = self.analyse_loss(
real(self.ep_r_f), real(self.ep_reff_f), self.tand_f,
self.rho, self.mu_r,
real(self._z_characteristic), real(self._z_characteristic),
self.frequency.f, self.w, self.t, self.rough)
def __str__(self) -> str:
f=self.frequency
output = \
'Microstripline Media. %i-%i %s. %i points'%\
(f.f_scaled[0],f.f_scaled[-1],f.unit, f.npoints) + \
'\n W= %.2em, H= %.2em'% \
(self.w,self.h)
return output
def __repr__(self) -> str:
return self.__str__()
@property
def gamma(self):
"""
Propagation constant.
Returns
-------
gamma : :class:`numpy.ndarray`
"""
ep_reff, f = real(self.ep_reff_f), self.frequency.f
alpha = self.alpha_dielectric.copy()
if self.rho is not None:
alpha += self.alpha_conductor
beta = 2 * pi * f* sqrt(ep_reff) / c
return alpha + 1j*beta
@property
def Z0(self) -> npy.ndarray:
"""
Characteristic Impedance.
Note
----
Beware confusion with z0, the port impedance of media
Returns
-------
Z0 : :class:`numpy.ndarray`
"""
return self._z_characteristic
@property
def Z0_f(self) -> npy.ndarray:
"""
Alias fos Characteristic Impedance for backward compatibility.
Deprecated, do not use.
Note
----
Beware confusion with z0, the port impedance of media
Returns
-------
Z0 : :class:`numpy.ndarray`
"""
warnings.warn(
"`Z0_f` is deprecated, use `Z0` instead",
DeprecationWarning, stacklevel = 2
)
return self._z_characteristic
def analyse_dielectric(self, ep_r: NumberLike, tand: NumberLike,
f_low: NumberLike, f_high: NumberLike,
f_epr_tand: NumberLike, f: NumberLike,
diel: str):
"""
This function calculate the frequency dependent relative permittivity
of dielectric and and tangeantial loss factor.
References
----------
.. [#] C. Svensson, G.E. Dermer,
Time domain modeling of lossy interconnects,
IEEE Trans. on Advanced Packaging, May 2001, N2, Vol. 24, pp.191-196.
.. [#] Djordjevic, R.M. Biljic, V.D. Likar-Smiljanic, T.K. Sarkar,
Wideband frequency-domain characterization of FR-4 and time-domain
causality,
IEEE Trans. on EMC, vol. 43, N4, 2001, p. 662-667.
Returns
-------
ep_r_f : :class:`numpy.ndarray`
tand_f : :class:`numpy.ndarray`
"""
if diel == 'djordjevicsvensson':
# compute the slope for a log frequency scale, tanD dependent.
k = log((f_high + 1j * f_epr_tand) / (f_low + 1j * f_epr_tand))
fd = log((f_high + 1j * f) / (f_low + 1j * f))
ep_d = -tand * ep_r / imag(k)
# value for frequency above f_high
ep_inf = ep_r * (1. + tand * real(k) / imag(k))
# compute complex permitivity
ep_r_f = ep_inf + ep_d * fd
# get tand
tand_f = -imag(ep_r_f) / real(ep_r_f)
elif diel == 'frequencyinvariant':
ep_r_f = ep_r - 1j * ep_r * tand
tand_f = tand
else:
raise ValueError('Unknown dielectric dispersion model')
return ep_r_f, tand_f
def analyse_quasi_static(self, ep_r: NumberLike,
w: NumberLike, h: NumberLike, t: NumberLike,
model: str):
"""
This function calculates the quasi-static impedance of a microstrip
line, the value of the effective permittivity as per filling factor
and the effective width due to the finite conductor thickness for the
given microstrip line and substrate properties.
References
----------
.. [#] E. Hammerstad and Ø. Jensen,
"Accurate Models for Microstrip Computer-Aided Design", Symposium
on Microwave Theory and Techniques, pp. 407-409, June 1980.
.. [#] H. A. Wheeler,
"Transmission-Line Properties of a Strip on a Dielectric Sheet on a
Plane, IEEE Trans. on Microwave Theory and Techniques, vol. 25,
no. 8, pp. 631-647, Aug. 1977.
.. [#] M. V. Schneider,
"Microstrip Lines for Microwave Integrated Circuits",
The Bell System Technical Journal, vol. 48, pp. 1421-1444, May 1969.
Returns
-------
zl_eff : :class:`numpy.ndarray`
ep_reff : :class:`numpy.ndarray`
"""
Z0 = sqrt(mu_0 / epsilon_0)
zl_eff = Z0
ep_reff = ep_r
w_eff = w
if model == 'wheeler':
# compute strip thickness effect
dw1 = 0
if t is not None and t > 0:
dw1 = t / pi * log(4. * exp(1.) / sqrt((t / h)**2) + \
(1. / pi / (w / t + 1.1))**2)
dwr = (1. + 1. / ep_r) / 2. * dw1
wr = w + dwr
w_eff = wr
# compute characteristic impedance
if (w / h) < 3.3:
cp = log(4. * h / wr + sqrt((4 * h / wr)**2 + 2))
b = (ep_r - 1.) / (ep_r + 1.) / \
2 * (log(pi / 2.) + log(4. / pi) / ep_r)
zl_eff = (cp - b) * Z0 / pi / sqrt(2 * (ep_r + 1.))
else:
cp = 1 + log(pi / 2.) + log(wr / h / 2. + 0.94)
d = 1. / pi / 2. * (1. + log(pi**2 / 16.)) * (ep_r - 1.) \
/ ep_r**2
x = 2. * log(2.) / pi + wr / h / 2. + (ep_r + 1.) / 2 / pi / \
ep_r * cp + d
zl_eff = Z0 / 2 / x / sqrt(ep_r)
# compute effective dielectric constant
if (w / h) < 1.3:
a = log(8 * h / wr) + (wr / h)**2 / 32
b = (ep_r - 1.) / (ep_r + 1.) / \
2 * (log(pi / 2.) + log(4. / pi) / ep_r)
ep_reff = (ep_r + 1.) / 2. * (a / (a - b))**2
else:
# qucsator is 4.0137 but doc 0.94 * 2 = 1.88
d = (ep_r - 1.) / 2. / pi / ep_r * \
(log(2.1349 * wr / h + 4.0137) - 0.5169 / ep_r)
e = wr / h / 2 + 1. / pi * log(8.5397 * wr / h + 16.0547)
ep_reff = ep_r * ((e - d) / e)**2
elif model == 'schneider':
u = w / h
dw = 0
# consider strip thickness equations
if t is not None and t > 0:
if t < (w / 2):
if u < (1. / pi / 2):
arg = 2 * pi * w / t
else:
arg = h / t
dw = t / pi * (1. + log(2 * arg))
if (t / dw) >= 0.75:
dw = 0
w_eff = w + dw
u = w_eff / h
# effective dielectric constant
ep_reff = (ep_r + 1.) / 2. + (ep_r - 1.) / 2. / sqrt (1. + 10. / u)
# characteristic impedance
if u < 1.:
z = 1. / pi / 2. * log(8. / u + u / 4)
else:
z = 1. / (u + 2.42 - 0.44 / u + (1. - 1. / u)**6)
zl_eff = Z0 * z / sqrt(ep_reff)
elif model == 'hammerstadjensen':
u = w / h
if t is not None:
t = t/h
du1 = 0.
# compute strip thickness effect
if t is not None and t > 0:
# Qucs formula 11.22 is wrong, normalized w has to be used instead (see Hammerstad and Jensen Article)
# Normalized w is named u and is actually used in qucsator source code
# coth(alpha) = 1/tanh(alpha)
du1 = t / pi * log(1. + 4. * exp(1.) / t * tanh(sqrt(6.517 * u))**2)
# sech(alpha) = 1/cosh(alpha)
dur = du1 * (1. + 1. / cosh(sqrt(ep_r - 1.))) / 2.
u1 = u + du1
ur = u + dur
w_eff = ur * h
# compute impedances for homogeneous medium
zr = hammerstad_zl(ur)
z1 = hammerstad_zl(u1)
# compute effective dielectric constant
a, b = hammerstad_ab(ur, ep_r)
e = hammerstad_er(ur, ep_r, a, b)
# compute final characteristic impedance and dielectric constant
#including strip thickness effects
zl_eff = zr / sqrt(e)
ep_reff = e * (z1 / zr)**2
else:
raise ValueError('Unknown microstripline quasi-static model')
return zl_eff, ep_reff, w_eff
def analyse_dispersion(self, zl_eff: NumberLike, ep_reff: NumberLike,
ep_r: NumberLike, wr: NumberLike, w_eff: NumberLike,
h: NumberLike, t: NumberLike, f: NumberLike,
disp: str):
"""
This function compute the frequency dependent characteristic
impedance and effective permittivity accounting for microstripline
frequency dispersion.
References
----------
.. [#] M. Kobayashi,
"A Dispersion Formula Satisfying Recent Requirements in Microstrip
CAD", IEEE Trans. on Microwave Theory and Techniques, vol. 36,
no. 8, pp. 1246-1250, Aug. 1988.
.. [#] M. V. Schneider, "Microstrip Dispersion", Proceedings of the
IEEE, Letters, vol. 60, Jan. 1972, pp. 144-146.
.. [#] M. Kirschning and R. H. Jansen,
"Accurate Model for Effective Dielectric Constant of Microstrip
with Validity up to Millimeter-Wave Frequencies", Electronics
Letters, vol. 8, no. 6, pp. 272-273, Mar. 1982.
.. [#] R. H. Jansen and M. Kirschning,
"Arguments and an accurate Model for the Power-Current Formulation of
Microstrip Characteristic Impedance",
Archiv für Elektronik und Übertragungstechnik (AEÜ), vol. 37,
pp. 108-112, 1983.
.. [#] E. Yamashita, K. Atsuki, and T. Ueda,
"An Approximate Dispersion Formula of Microstrip Lines for
Computer Aided Design of Microwave Integrated Circuits",
IEEE Trans. on Microwave Theory and Techniques, vol. 27,
pp. 1036-1038, Dec. 1979.
Returns
-------
z : :class:`numpy.ndarray`
e : :class:`numpy.ndarray`
"""
u = wr/h
if disp == 'schneider':
k = sqrt(ep_reff / ep_r)
fn = 4. * h * f / c * sqrt(ep_r - 1.)
fn2 = fn**2
e = ep_reff * ((1. + fn2) / (1. + k * fn2))**2
z = zl_eff * sqrt(ep_reff / e)
elif disp == 'hammerstadjensen':
Z0 = sqrt(mu_0 / epsilon_0)
g = pi**2 / 12 * (ep_r - 1) / ep_reff * sqrt(2 * pi * zl_eff / Z0)
fp = (2 * mu_0 * h * f) / zl_eff
e = ep_r - (ep_r - ep_reff) / (1 + g * fp**2)
z = zl_eff * sqrt(ep_reff / e) * (e - 1) / (ep_reff - 1)
elif disp == 'kirschningjansen':
fn = f * h * 1e-6
e = kirsching_er(u, fn, ep_r, ep_reff)
z, _ = kirsching_zl(u, fn, ep_r, ep_reff, e, zl_eff)
elif disp == 'yamashita':
k = sqrt(ep_r / ep_reff)
fp = 4 * h * f / c * sqrt(ep_r - 1) * \
(0.5 + (1 + 2 * log10(1 + u))**2)
e = ep_reff * ((1 + k * fp**1.5 / 4) / (1 + fp**1.5 / 4))**2
# qucs keep quasi-static impedance here
if self.compatibility_mode == 'qucs':
z = npy.ones(f.shape) * zl_eff
# use Kirschning Jansen for impedance dispersion by default
else:
fn = f * h * 1e-6
z, _ = kirsching_zl(wr / h, fn, ep_r, ep_reff, e, zl_eff)
elif disp == 'kobayashi':
fk = c * arctan(ep_r * sqrt((ep_reff - 1) / (ep_r - ep_reff)))/ \
(2 * pi * h * sqrt(ep_r - ep_reff))
fh = fk / (0.75 + (0.75 - 0.332 / (ep_r**1.73)) * u)
no = 1 + 1 / (1 + sqrt(u)) + 0.32 * (1 / (1 + sqrt(u)))**3
nc = npy.where(u < 0.7,
1 + 1.4 / (1 + u) * (0.15 - 0.235 * exp(-0.45 * f / fh)),
1)
n = npy.where(no * nc < 2.32, no * nc, 2.32)
e = ep_r - (ep_r - ep_reff) / (1 + (f / fh)**n)
# qucs keep quasi-static impedance here
if self.compatibility_mode == 'qucs':
z = npy.ones(f.shape) * zl_eff
# use Kirschning Jansen for impedance dispersion by default
else:
fn = f * h * 1e-6
z, _ = kirsching_zl(wr / h, fn, ep_r, ep_reff, e, zl_eff)
elif disp == 'none':
e = ones(f.shape) * ep_reff
z = ones(f.shape) * zl_eff
else:
raise ValueError('Unknown microstripline dispersion model')
return z, e
def analyse_loss(self, ep_r: NumberLike, ep_reff: NumberLike,
tand: NumberLike, rho: NumberLike, mu_r: NumberLike,
zl_eff_f1: NumberLike, zl_eff_f2: NumberLike,
f: NumberLike, w: NumberLike, t: NumberLike,
D: NumberLike):
"""
The function calculates the conductor and dielectric losses of a
single microstrip line using wheeler's incremental inductance rule.
References
----------
.. [#] H. A. Wheeler, "Formulas for the Skin Effect,"
Proceedings of the IRE, vol. 30, no. 9, pp. 412-424, Sept. 1942.
Returns
-------
a_conductor : :class:`numpy.ndarray`
a_dielectric : :class:`numpy.ndarray`
"""
# limited to only Hammerstad and Jensen model
Z0 = npy.sqrt(mu_0/epsilon_0)
# conductor losses
if t is not None and t > 0:
if rho is None:
raise(AttributeError('must provide resistivity rho. '
'see initializer help'))
else:
Rs = surface_resistivity(f=f, rho=rho, mu_r=1)
ds = skin_depth(f, rho, mu_r)
if(npy.any(t < 3 * ds)):
warnings.warn(
'Conductor loss calculation invalid for line'
'height t ({}) < 3 * skin depth ({})'.format(t, ds[0]),
RuntimeWarning
)
# current distribution factor
Ki = exp(-1.2 * ((zl_eff_f1 + zl_eff_f2) / 2 / Z0)**0.7)
# D is RMS surface roughness
Kr = 1 + 2 / pi * arctan(1.4 * (D/ds)**2)
a_conductor = Rs / (zl_eff_f1 * w) * Ki * Kr
else:
a_conductor = zeros(f.shape)
# dielectric losses
l0 = c / f
a_dielectric = pi * ep_r / (ep_r - 1) * (ep_reff - 1) / \
sqrt(ep_reff) * tand / l0
return a_conductor, a_dielectric
def hammerstad_ab(u: NumberLike, ep_r: NumberLike) -> NumberLike:
"""
Hammerstad parameters for relative permittivity dispersion.
"""
a = 1. + log((u**4 + (u / 52.)**2) / (u**4 + 0.432)) / 49. \
+ log(1 + (u / 18.1)**3) / 18.7
b = 0.564 * ((ep_r - 0.9) / (ep_r + 3.))**0.053
return a, b
def hammerstad_zl(u: NumberLike) -> NumberLike:
"""
Hammerstad quasi-static impedance.
"""
fu = 6 + (2 * pi - 6) * exp(-(30.666 / u)**0.7528)
Z0 = sqrt(mu_0/epsilon_0)
return Z0 / 2. / pi * log(fu / u + sqrt(1. + (2. / u)**2))
def hammerstad_er(u: NumberLike, ep_r: NumberLike, a: NumberLike,
b: NumberLike) -> NumberLike:
"""
Hammerstad quasi-static relative permittivity.
"""
return (ep_r + 1) / 2 + (ep_r - 1) / 2 * (1. + 10. / u)**(-a * b)
def kirsching_zl(u: NumberLike, fn: NumberLike,
ep_r: NumberLike, ep_reff: NumberLike, ep_reff_f: NumberLike,
zl_eff: NumberLike):
"""
Kirschning Jansen impedance dispersion.
"""
#fn = f * h * 1e-6 # GHz-mm
R1 = npy.minimum(0.03891 * ep_r**1.4, 20.)
R2 = npy.minimum(0.2671 * u**7, 20.)
R3 = 4.766 * exp(-3.228 * u**0.641)
R4 = 0.016 + (0.0514 * ep_r)**4.524
R5 = (fn / 28.843)**12
R6 = npy.minimum(22.20 * u **1.92, 20.)
R7 = 1.206 - 0.3144 * exp(-R1) * (1 - exp(-R2))
R8 = 1 + 1.275 * (1 - exp(-0.004625 * R3 * ep_r**1.674 \
* (fn / 18.365)**2.745))
R9 = 5.086 * R4 * R5/(0.3838 + 0.386 * R4) \
* exp(-R6) / (1 + 1.2992 * R5) \
* (ep_r - 1)**6 / (1 + 10 * (ep_r - 1)**6)
R10 = 0.00044 * ep_r**2.136 + 0.0184
R11 = (fn / 19.47)**6 / (1 + 0.0962 * (fn / 19.47)**6)
R12 = 1 / (1 + 0.00245 * u**2)
R13 = 0.9408 * ep_reff_f**R8 - 0.9603
R14 = (0.9408 - R9) * ep_reff**R8 - 0.9603
R15 = 0.707 * R10 * (fn / 12.3)**1.097
R16 = 1 + 0.0503 * ep_r**2 * R11 * (1 - exp(-(u / 15)**6))
R17 = R7 * (1 - 1.1241 * R12 / R16 \
*exp(-0.026 * fn**1.15656 - R15))
return zl_eff * (R13 / R14)**R17, R17
def kirsching_er(u: NumberLike, fn: NumberLike,
ep_r: NumberLike, ep_reff: NumberLike):
"""
Kirschning Jansen relative permittivity dispersion.
"""
# in the paper fn is in GHz-cm while in Qucs it is GHz-mm, thus a factor
# 10 for all constant that multiply or divide fn
P1 = 0.27488 + (0.6315 + 0.525 / ( 1+ 0.0157 * fn)**20) * u \
-0.065683 * exp(-8.7513 * u)
P2 = 0.33622 * (1 -exp(-0.03442 * ep_r))
P3 = 0.0363 * exp(-4.6 * u) * (1 - exp(-(fn / 38.7)**4.97))
P4 = 1 + 2.751 * (1 - exp(-(ep_r / 15.916)**8))
Pf = P1 * P2 * ((0.1844 + P3 * P4) * fn)**1.5763
return ep_r - (ep_r - ep_reff) / (1 + Pf)
|
{
"content_hash": "24eb18f0b6b6408bdcd796e68481153d",
"timestamp": "",
"source": "github",
"line_count": 719,
"max_line_length": 118,
"avg_line_length": 41.21418636995828,
"alnum_prop": 0.5188809772888334,
"repo_name": "scikit-rf/scikit-rf",
"id": "8f567103fd19d1225cff0c61e29bfc5bf7747c99",
"size": "29641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skrf/media/mline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6908"
},
{
"name": "Python",
"bytes": "1998676"
},
{
"name": "TypeScript",
"bytes": "1286336"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.