text stringlengths 4 1.02M | meta dict |
|---|---|
from test_models import *
from test_utils import *
from test_periods import *
from test_templatetags import *
from test_views import *
from test_rule import *
| {
"content_hash": "7ab64b7a9ac7f73d1fe661722b8bc2a4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 31,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.76875,
"repo_name": "tscholze/py-hasi-home-analytical-system-interface",
"id": "3b3a11aa86c39a7e936ed4dd0fa1541f6b858bdc",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hasi/schedule/tests/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45942"
},
{
"name": "HTML",
"bytes": "53572"
},
{
"name": "JavaScript",
"bytes": "37679"
},
{
"name": "Python",
"bytes": "159855"
}
],
"symlink_target": ""
} |
import requests
import time
import random
from pyquery import PyQuery as pq
from mongodb import db
from settings import ALADDIN_BASE_URLS
def get_aladdin_pages():
"""
获取每个分类的分页url,存进mongodb
:return:
"""
breadcrumb_search_url = 'http://www.aladdin-e.com/breadcrumbSearch/1/{amount}/20/{page}/CS/{catalog}'
for key, url in ALADDIN_BASE_URLS.items():
res = requests.get(url)
if res.status_code == 200:
content = res.content
p = pq(content)
amount = int(p('#itemListNavigationTopUtil .ml5.fl').text()[2: -10])
catalog = url.split('/')[-1]
pages = amount // 20
if amount % 20:
pages += 1
for i in range(pages):
url = breadcrumb_search_url.format(**{
'amount': amount,
'page': i+1,
'catalog': catalog
})
db.aladdin_base_url.update({'keyword': key}, {'$addToSet': {'urls': url}}, upsert=True)
time.sleep(random.randint(0,5))
if __name__ == '__main__':
get_aladdin_pages()
| {
"content_hash": "9ee78160a1033ee3775fa7baa8ddc0b9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 105,
"avg_line_length": 29.763157894736842,
"alnum_prop": 0.53315649867374,
"repo_name": "mutoulbj/chem_spider",
"id": "ffd51538f4df1b66ab02174e006605cd89fbc5fb",
"size": "1200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chem_spider/aladdin_base_pages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "535670"
},
{
"name": "Python",
"bytes": "57579"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
import os
import sys
from optparse import OptionParser
from urllib import urlopen
from ua_mapper.wurfl2python import WurflPythonWriter, DeviceSerializer
OUTPUT_PATH = os.path.abspath(os.path.dirname(__file__))
WURFL_ARCHIVE_PATH = os.path.join(OUTPUT_PATH, "wurfl.zip")
WURFL_XML_PATH = os.path.join(OUTPUT_PATH, "wurfl.xml")
WURFL_PY_PATH = os.path.join(OUTPUT_PATH, "wurfl.py")
WURFL_DOWNLOAD_URL = 'http://downloads.sourceforge.net/project/wurfl/WURFL/latest/wurfl-latest.zip'
class Updater(object):
help = 'Updates Wurfl devices database.'
def write_archive(self, filename, data):
f = open(WURFL_ARCHIVE_PATH, "w")
f.write(data)
f.close()
def fetch_latest_wurfl(self):
print "Downloading Wurfl..."
data = urlopen(WURFL_DOWNLOAD_URL).read()
self.write_archive(WURFL_ARCHIVE_PATH, data)
os.system("unzip -o %s -d %s" % (WURFL_ARCHIVE_PATH, OUTPUT_PATH))
return True
def wurfl_to_python(self):
print "Compiling device list..."
# Setup options.
op = OptionParser()
op.add_option("-l", "--logfile", dest="logfile", default=sys.stderr,
help="where to write log messages")
# Cleanup args for converter to play nicely.
if '-f' in sys.argv:
sys.argv.remove('-f')
if '--force' in sys.argv:
sys.argv.remove('--force')
options, args = op.parse_args()
options = options.__dict__
options.update({"outfile": WURFL_PY_PATH})
# Perform conversion.
wurfl = WurflPythonWriter(WURFL_XML_PATH, device_handler=DeviceSerializer, options=options)
wurfl.process()
def handle(self, *args, **options):
self.fetch_latest_wurfl()
self.wurfl_to_python()
from ua_mapper.wurfl import devices
print "Done."
Updater().handle()
| {
"content_hash": "bf7dc45f0179b0fe97daf5d943135405",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 99,
"avg_line_length": 32.758620689655174,
"alnum_prop": 0.6236842105263158,
"repo_name": "praekelt/wsgi-ua-mapper",
"id": "69875b6dccbf6cc613d30a85d01e61dfe3989140",
"size": "1900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ua_mapper/updatewurfl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9311236"
}
],
"symlink_target": ""
} |
"""
The lexicon is constructed by calling
``lexicon.fromstring(<lexicon string>)``.
In order to construct a parser, you also need a rule set.
The standard English rules are provided in chart as
``chart.DefaultRuleSet``.
The parser can then be constructed by calling, for example:
``parser = chart.CCGChartParser(<lexicon>, <ruleset>)``
Parsing is then performed by running
``parser.parse(<sentence>.split())``.
While this returns a list of trees, the default representation
of the produced trees is not very enlightening, particularly
given that it uses the same tree class as the CFG parsers.
It is probably better to call:
``chart.printCCGDerivation(<parse tree extracted from list>)``
which should print a nice representation of the derivation.
This entire process is shown far more clearly in the demonstration:
python chart.py
"""
from __future__ import print_function, division, unicode_literals
import itertools
from nltk.parse import ParserI
from nltk.parse.chart import AbstractChartRule, EdgeI, Chart
from nltk.tree import Tree
from nltk.ccg.lexicon import fromstring
from nltk.ccg.combinator import (ForwardT, BackwardT, ForwardApplication,
BackwardApplication, ForwardComposition,
BackwardComposition, ForwardSubstitution,
BackwardBx, BackwardSx)
from nltk.compat import python_2_unicode_compatible, string_types
# Based on the EdgeI class from NLTK.
# A number of the properties of the EdgeI interface don't
# transfer well to CCGs, however.
class CCGEdge(EdgeI):
def __init__(self, span, categ, rule):
self._span = span
self._categ = categ
self._rule = rule
self._comparison_key = (span, categ, rule)
# Accessors
def lhs(self): return self._categ
def span(self): return self._span
def start(self): return self._span[0]
def end(self): return self._span[1]
def length(self): return self._span[1] - self.span[0]
def rhs(self): return ()
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def nextsym(self): return None
def categ(self): return self._categ
def rule(self): return self._rule
class CCGLeafEdge(EdgeI):
'''
Class representing leaf edges in a CCG derivation.
'''
def __init__(self, pos, categ, leaf):
self._pos = pos
self._categ = categ
self._leaf = leaf
self._comparison_key = (pos, categ, leaf)
# Accessors
def lhs(self): return self._categ
def span(self): return (self._pos, self._pos+1)
def start(self): return self._pos
def end(self): return self._pos + 1
def length(self): return 1
def rhs(self): return self._leaf
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def nextsym(self): return None
def categ(self): return self._categ
def leaf(self): return self._leaf
@python_2_unicode_compatible
class BinaryCombinatorRule(AbstractChartRule):
'''
Class implementing application of a binary combinator to a chart.
Takes the directed combinator to apply.
'''
NUMEDGES = 2
def __init__(self,combinator):
self._combinator = combinator
# Apply a combinator
def apply(self, chart, grammar, left_edge, right_edge):
# The left & right edges must be touching.
if not (left_edge.end() == right_edge.start()):
return
# Check if the two edges are permitted to combine.
# If so, generate the corresponding edge.
if self._combinator.can_combine(left_edge.categ(),right_edge.categ()):
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
new_edge = CCGEdge(span=(left_edge.start(), right_edge.end()),categ=res,rule=self._combinator)
if chart.insert(new_edge,(left_edge,right_edge)):
yield new_edge
# The representation of the combinator (for printing derivations)
def __str__(self):
return "%s" % self._combinator
# Type-raising must be handled slightly differently to the other rules, as the
# resulting rules only span a single edge, rather than both edges.
@python_2_unicode_compatible
class ForwardTypeRaiseRule(AbstractChartRule):
'''
Class for applying forward type raising
'''
NUMEDGES = 2
def __init__(self):
self._combinator = ForwardT
def apply(self, chart, grammar, left_edge, right_edge):
if not (left_edge.end() == right_edge.start()):
return
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
new_edge = CCGEdge(span=left_edge.span(),categ=res,rule=self._combinator)
if chart.insert(new_edge,(left_edge,)):
yield new_edge
def __str__(self):
return "%s" % self._combinator
@python_2_unicode_compatible
class BackwardTypeRaiseRule(AbstractChartRule):
'''
Class for applying backward type raising.
'''
NUMEDGES = 2
def __init__(self):
self._combinator = BackwardT
def apply(self, chart, grammar, left_edge, right_edge):
if not (left_edge.end() == right_edge.start()):
return
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
new_edge = CCGEdge(span=right_edge.span(),categ=res,rule=self._combinator)
if chart.insert(new_edge,(right_edge,)):
yield new_edge
def __str__(self):
return "%s" % self._combinator
# Common sets of combinators used for English derivations.
ApplicationRuleSet = [BinaryCombinatorRule(ForwardApplication),
BinaryCombinatorRule(BackwardApplication)]
CompositionRuleSet = [BinaryCombinatorRule(ForwardComposition),
BinaryCombinatorRule(BackwardComposition),
BinaryCombinatorRule(BackwardBx)]
SubstitutionRuleSet = [BinaryCombinatorRule(ForwardSubstitution),
BinaryCombinatorRule(BackwardSx)]
TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()]
# The standard English rule set.
DefaultRuleSet = ApplicationRuleSet + CompositionRuleSet + \
SubstitutionRuleSet + TypeRaiseRuleSet
class CCGChartParser(ParserI):
'''
Chart parser for CCGs.
Based largely on the ChartParser class from NLTK.
'''
def __init__(self, lexicon, rules, trace=0):
self._lexicon = lexicon
self._rules = rules
self._trace = trace
def lexicon(self):
return self._lexicon
# Implements the CYK algorithm
def parse(self, tokens):
tokens = list(tokens)
chart = CCGChart(list(tokens))
lex = self._lexicon
# Initialize leaf edges.
for index in range(chart.num_leaves()):
for cat in lex.categories(chart.leaf(index)):
new_edge = CCGLeafEdge(index, cat, chart.leaf(index))
chart.insert(new_edge, ())
# Select a span for the new edges
for span in range(2,chart.num_leaves()+1):
for start in range(0,chart.num_leaves()-span+1):
# Try all possible pairs of edges that could generate
# an edge for that span
for part in range(1,span):
lstart = start
mid = start + part
rend = start + span
for left in chart.select(span=(lstart,mid)):
for right in chart.select(span=(mid,rend)):
# Generate all possible combinations of the two edges
for rule in self._rules:
edges_added_by_rule = 0
for newedge in rule.apply(chart,lex,left,right):
edges_added_by_rule += 1
# Output the resulting parses
return chart.parses(lex.start())
class CCGChart(Chart):
def __init__(self, tokens):
Chart.__init__(self, tokens)
# Constructs the trees for a given parse. Unfortnunately, the parse trees need to be
# constructed slightly differently to those in the default Chart class, so it has to
# be reimplemented
def _trees(self, edge, complete, memo, tree_class):
assert complete, "CCGChart cannot build incomplete trees"
if edge in memo:
return memo[edge]
if isinstance(edge,CCGLeafEdge):
word = tree_class(edge.lhs(), [self._tokens[edge.start()]])
leaf = tree_class((edge.lhs(), "Leaf"), [word])
memo[edge] = [leaf]
return [leaf]
memo[edge] = []
trees = []
lhs = (edge.lhs(), "%s" % edge.rule())
for cpl in self.child_pointer_lists(edge):
child_choices = [self._trees(cp, complete, memo, tree_class)
for cp in cpl]
for children in itertools.product(*child_choices):
trees.append(tree_class(lhs, children))
memo[edge] = trees
return trees
#--------
# Displaying derivations
#--------
def printCCGDerivation(tree):
# Get the leaves and initial categories
leafcats = tree.pos()
leafstr = ''
catstr = ''
# Construct a string with both the leaf word and corresponding
# category aligned.
for (leaf, cat) in leafcats:
str_cat = "%s" % cat
# print(cat.__class__)
# print("str_cat", str_cat)
nextlen = 2 + max(len(leaf), len(str_cat))
lcatlen = (nextlen - len(str_cat)) // 2
rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
catstr += ' '*lcatlen + str_cat + ' '*rcatlen
lleaflen = (nextlen - len(leaf)) // 2
rleaflen = lleaflen + (nextlen - len(leaf)) % 2
leafstr += ' '*lleaflen + leaf + ' '*rleaflen
print(leafstr)
print(catstr)
# Display the derivation steps
printCCGTree(0,tree)
# Prints the sequence of derivation steps.
def printCCGTree(lwidth,tree):
rwidth = lwidth
# Is a leaf (word).
# Increment the span by the space occupied by the leaf.
if not isinstance(tree,Tree):
return 2 + lwidth + len(tree)
# Find the width of the current derivation step
for child in tree:
rwidth = max(rwidth, printCCGTree(rwidth,child))
# Is a leaf node.
# Don't print anything, but account for the space occupied.
if not isinstance(tree.label(), tuple):
return max(rwidth,2 + lwidth + len("%s" % tree.label()),
2 + lwidth + len(tree[0]))
(res,op) = tree.label()
# Pad to the left with spaces, followed by a sequence of '-'
# and the derivation rule.
print(lwidth*' ' + (rwidth-lwidth)*'-' + "%s" % op)
# Print the resulting category on a new line.
str_res = "%s" % res
respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
print(respadlen*' ' + str_res)
return rwidth
### Demonstration code
# Construct the lexicon
lex = fromstring('''
:- S, NP, N, VP # Primitive categories, S is the target primitive
Det :: NP/N # Family of words
Pro :: NP
TV :: VP/NP
Modal :: (S\\NP)/VP # Backslashes need to be escaped
I => Pro # Word -> Category mapping
you => Pro
the => Det
# Variables have the special keyword 'var'
# '.' prevents permutation
# ',' prevents composition
and => var\\.,var/.,var
which => (N\\N)/(S/NP)
will => Modal # Categories can be either explicit, or families.
might => Modal
cook => TV
eat => TV
mushrooms => N
parsnips => N
bacon => N
''')
def demo():
parser = CCGChartParser(lex, DefaultRuleSet)
for parse in parser.parse("I might cook and eat the bacon".split()):
printCCGDerivation(parse)
if __name__ == '__main__':
demo()
| {
"content_hash": "7294755a22a69ee060e44684bf33f3ca",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 110,
"avg_line_length": 33.72598870056497,
"alnum_prop": 0.6151268950498366,
"repo_name": "nelango/ViralityAnalysis",
"id": "9b89e8ddbf94792c957ec9e183056afa4631c268",
"size": "12161",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "model/lib/nltk/ccg/chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1382177"
},
{
"name": "Java",
"bytes": "18805"
},
{
"name": "JavaScript",
"bytes": "10958"
},
{
"name": "Python",
"bytes": "17814735"
}
],
"symlink_target": ""
} |
from helper import norm, unitize
from collections import defaultdict
from math import pow
import scipy as sp
import pprint
from logger import logger
base_logger = logger.getChild('links')
base_logger.info('Inside links.py')
########################################################
### Link Stuff ########################################
########################################################
XSIZE=20
MAXSTRETCH = 1.2
###########
# custom allclose
###########
from scipy import allclose
def allclose2(a,b,rtol=1e-05,atol=1e-08):
ax,ay = a
bx,by = b
return ( abs(ax-bx) <= atol + rtol*abs(bx) ) and ( abs(ay-by) <= atol + rtol*abs(by))
class Link(object):
""" A link object keeps a link between two cells.
Initialization:
* one: cell one
* two: cell two
* L: resting length of link_disp
* C_10: first coefficient of stress-strain relationship
* C_11: second coefficient of stress-strain relationship
* xsize: size of box (to handle periodic boundary conditions)
Properties:
* disp - displacement of spring
* stress - stress of spring
* extension - compute the current link extension
* energy - compute the current link energy
* force - force of spring
"""
logger = base_logger.getChild('Link')
def __init__(self,one,two,L=None,C_10=None,C_11=None,xsize=XSIZE,maxstretch=None):
self.one = one
self.two = two
self.xsize=xsize
self.offset = sp.array([self.xsize,0])
if L is None:
self.L = 0.5 * (self.one.type.L + self.two.type.L)
else:
self.L = L
if C_10 is None:
self.C_10 = 0.5 * (self.one.type.C_10 + self.two.type.C_10)
else:
self.C_10 = C_10
if C_11 is None:
self.C_11 = 0.5 * (self.one.type.C_11 + self.two.type.C_11)
else:
self.C_11 = C_11
if maxstretch is None:
self.maxstretch = 0.5 * (self.one.type.maxstretch + self.two.type.maxstretch)
self.broken = False
self._cached_disp = sp.array([0.,0.])
self._cached_force = sp.array([0.,0.])
logger.debug("""Created a Link with:
{info}""".format(info=pprint.pformat(self.__dict__)))
def __repr__(self):
return "<Link: C_10={0.C_10}, C_11={0.C_11}, L={0.L}, betwixt:{0.one},{0.two}>".format(self)
@property
def calculation_necessary(self):
if allclose2( self.disp, self._cached_disp ):
return False
return True
@property
def disp(self):
""" Displacement from one to two """
disp = self.one.pos - self.two.pos
if norm(disp) > self.xsize/2:
disp = self.one.pos + self.offset - self.two.pos
return disp
@property
def disp_old(self):
""" Displacement from one to two """
direct = self.one.pos - self.two.pos
around = self.one.pos + self.offset -self.two.pos
if norm(direct) <= norm(around):
return direct
else:
return around
def extension_without_breaking(self):
""" Get the extension of the current link without breaking """
length = norm(self.disp)
return length
def stress_without_breaking(self):
""" Stress from one to two """
stretch = self.extension_without_breaking()/self.L
if stretch > 1.0:
stress = 2*self.C_10*(stretch-1.0/(stretch*stretch))+6*self.C_11*(stretch*stretch-stretch-1+1.0/(stretch*stretch)+1.0/(stretch*stretch*stretch)-1.0/(stretch*stretch*stretch*stretch))
else :
stress = 2*self.C_10*(stretch-1)
#stress = 0.0
return stress
@property
def extension(self):
""" Get the current extension of the link """
#abs_stress = self.stress_without_breaking()
length = self.extension_without_breaking()
if not self.broken and length > self.maxstretch * self.L and self.C_10 > 0 and self.C_11 > 0:
logger.warning('One of our links is breaking!')
print '\n'
print self.one.type.name, 'pos=', self.one.pos, self.two.type.name, 'pos=', self.two.pos, 'extension=',length, 'zero extension=',self.L, 'stretch=', length/self.L, 'max stretch=', self.maxstretch
print '\n'
self.C_10 = 0
self.C_11 = 0
self.broken = True
return length
@property
def stress(self):
""" Stress from one to two """
stretch = self.extension/self.L
#if stretch > 1.0:
stress = 2*self.C_10*(stretch-1.0/(stretch*stretch))+6*self.C_11*(stretch*stretch-stretch-1+1.0/(stretch*stretch)+1.0/(stretch*stretch*stretch)-1.0/(stretch*stretch*stretch*stretch))
#else :
# stress = 2*self.C_10*(stretch-1)
return stress
@property
def energy(self):
""" Get the energy stored in a link """
stretch = self.extension/self.L
return self.C_10*(stretch*stretch+2.0/stretch-3)+self.C_11*(2*stretch*stretch*stretch-3*stretch*stretch-6*stretch-6.0/stretch-3.0/(stretch*stretch)+2.0/(stretch*stretch*stretch*stretch)+14)
@property
def force(self):
""" Get the force the link enacts """
average_cell_radius = self.L/2.0
if self.broken:
return 0
if self.calculation_necessary:
stress = self.stress
disp = self.disp
self._cached_disp = disp
force = - stress * 3.141592 * average_cell_radius * average_cell_radius * unitize(disp)
self._cached_force = force
return force
else:
return self._cached_force
class Links(object):
""" Container for Links
Main Attributes:
* data : holds links, a dictionary where for a pair of cells holds a pointer to the Link for those two cells
* neighbors : holds neighbor information, a dictionary that for each cell stores a set of its neighbors.
This class basically a wrapper for the builtin dictionary, such that
accessing its arguments is independent of order.
"""
logger = base_logger.getChild('Links')
def __init__(self):
self.data = {} #where links go.
self.neighbors = defaultdict(set)
logger.debug('Links collection created.')
def __repr__(self):
return "<Links: has {} links betwixt {} cells>".format(len(self.data),
len(self.neighbors))
def ord(self,one,two):
return tuple(sorted((one,two)))
def add_link(self,one,two,*args,**kwargs):
""" Add a link between cells one and two """
self.data[self.ord(one,two)] = Link(one,two,*args,**kwargs)
self.neighbors[one].add(two)
self.neighbors[two].add(one)
logger.debug('Created a link between {one} and {two}'.format(one=one,two=two))
def remove_link(self,one,two):
""" Remove a link between cells one and two """
del self.data[self.ord(one,two)]
self.neighbors[one].remove(two)
self.neighbors[two].remove(one)
logger.debug('Removed a link between {one} and {two}'.format(one=one,two=two))
def remove_cell(self,cell):
""" Remove all references to a cell, both links and neighbors """
links = self.get_links(cell)
for link in links:
self.remove_link(link.one,link.two)
del self.neighbors[cell]
logger.debug('Removed the cell {cell}'.format(cell=cell))
def get_link(self,one,two):
""" Get the link between cells one and two, order independent """
return self.data[self.ord(one,two)]
def get_neighbors(self,cell):
""" Get the neighbors of cell cell """
return self.neighbors[cell]
def get_links(self,cell):
""" Get all of the links that involve cell """
neighbors = self.get_neighbors(cell)
links = [self.get_link(cell,neigh) for neigh in neighbors]
return links
def iteritems(self):
return self.data.iteritems()
def __iter__(self):
return iter(self.data.values())
def __getitem__(self,elem):
if hasattr(elem,'__iter__'):
one,two = elem
return self.get_link(one,two)
else:
if elem in self.neighbors:
return self.get_neighbors(elem)
else:
raise KeyError
def __delitem__(self,elem):
if hasattr(elem,'__iter__'):
one,two = elem
self.remove_link(one,two)
else:
if elem in self.neighbors:
self.remove_cell(elem)
else:
raise KeyError
#try to do __getitem__, __setitem__, __delitem__
| {
"content_hash": "9c7558d96cbc1d3e9df2b76df1da56e1",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 209,
"avg_line_length": 31.94326241134752,
"alnum_prop": 0.5576154529307282,
"repo_name": "alexalemi/cancersim",
"id": "8cf644c098487718024d1daf51f863d8b958b2d4",
"size": "9009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "491644"
},
{
"name": "CSS",
"bytes": "16242"
},
{
"name": "JavaScript",
"bytes": "26014"
},
{
"name": "Python",
"bytes": "302636"
},
{
"name": "Shell",
"bytes": "3249"
},
{
"name": "TeX",
"bytes": "60152"
}
],
"symlink_target": ""
} |
"""
OpentTMI module for Priority
"""
from opentmi_client.utils.Base import BaseApi
from opentmi_client.utils.decorators import setter_rules
class Priority(BaseApi):
"""
Priority class
"""
@property
def level(self):
"""
Getter for level
:return: String
"""
return self.get("level")
@level.setter
@setter_rules(
enum=['emerg', 'alert', 'crit', 'err',
'warning', 'notice', 'info', 'debug'])
def level(self, value):
"""
Setter for level
:param value: string
:return: Priority
"""
return self.set("level", value)
@property
def facility(self):
"""
Getter for facility
:return: String
"""
return self.get("facility")
@facility.setter
@setter_rules(
enum=['auth', 'cron', 'mail', 'news', 'syslog',
'user', 'result', 'resource', 'testcase'])
def facility(self, value):
"""
Setter for facility
:param value: string
:return: Priority
"""
return self.set("facility", value)
| {
"content_hash": "8aa07a64df533e5f0d41c03948fbb7ba",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 56,
"avg_line_length": 22.352941176470587,
"alnum_prop": 0.5280701754385965,
"repo_name": "OpenTMI/opentmi-client-python",
"id": "81ce17faec81fb7eb8f288e28af3d761d1a5fa9b",
"size": "1140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opentmi_client/api/event/Priority.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46281"
}
],
"symlink_target": ""
} |
import sys
from operator import itemgetter
def adder(tagN,tagW): # Adder Function which maintains Top N list
l = len(topNTags)
if len(topNTags) == None:l = 0
if l < N or topNTags[N-1][1] < totalWeight:
if l == N:topNTags.pop()
topNTags.append([oldTag,totalWeight])
topNTags.sort(key=itemgetter(1), reverse=True)
N = 10 # Change This to get Top N List
oldTag = None
totalWeight = 0
topNTags = []
for line in sys.stdin:
data = line.strip().split("\t")
if len(data) != 2: # Something has gone wrong. Skip this line.
continue
thisTag, thisWeight = data
if oldTag and oldTag != thisTag:
adder(oldTag, totalWeight)
totalWeight = 0
oldTag = thisTag
totalWeight += int(thisWeight) # Adds Particular Weight of Tag
adder(oldTag, totalWeight)
for i in range(len(topNTags)):
print topNTags[i][0],"\t",topNTags[i][1]
| {
"content_hash": "0c895682fa7b5c50c284a7c2d0771a48",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 83,
"avg_line_length": 28.11764705882353,
"alnum_prop": 0.6108786610878661,
"repo_name": "np1810/Hadoop_and_MapReduce",
"id": "0eae1d4220aea50189ac41a63b477a1c48e2a0b3",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "L5_FinalProject/popular_tags_extra/popular_tags_extra_reducer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28113"
}
],
"symlink_target": ""
} |
"""Operations for feeding input data using TensorFlow placeholders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def prepare_feed_dict(model, features, labels=None, is_training=None):
"""Prepares a feed_dict for sess.run() given a batch of features and labels.
Args:
model: An instance of AstroModel.
features: Dictionary containing "time_series_features" and "aux_features".
Each is a dictionary of named numpy arrays of shape
[batch_size, length].
labels: (Optional). Numpy array of shape [batch_size].
is_training: (Optional). Python boolean to feed to the model.is_training
Tensor (if None, no value is fed).
Returns:
feed_dict: A dictionary of input Tensor to numpy array.
"""
feed_dict = {}
for feature, tensor in model.time_series_features.iteritems():
feed_dict[tensor] = features["time_series_features"][feature]
for feature, tensor in model.aux_features.iteritems():
feed_dict[tensor] = features["aux_features"][feature]
if labels is not None:
feed_dict[model.labels] = labels
if is_training is not None:
feed_dict[model.is_training] = is_training
return feed_dict
def build_feature_placeholders(config):
"""Builds tf.Placeholder ops for feeding model features and labels.
Args:
config: ConfigDict containing the feature configurations.
Returns:
features: A dictionary containing "time_series_features" and "aux_features",
each of which is a dictionary of tf.Placeholders of features from the
input configuration. All features have dtype float32 and shape
[batch_size, length].
"""
batch_size = None # Batch size will be dynamically specified.
features = {"time_series_features": {}, "aux_features": {}}
for feature_name, feature_spec in config.iteritems():
placeholder = tf.placeholder(
dtype=tf.float32,
shape=[batch_size, feature_spec.length],
name=feature_name)
if feature_spec.is_time_series:
features["time_series_features"][feature_name] = placeholder
else:
features["aux_features"][feature_name] = placeholder
return features
def build_labels_placeholder():
"""Builds a tf.Placeholder op for feeding model labels.
Returns:
labels: An int64 tf.Placeholder with shape [batch_size].
"""
batch_size = None # Batch size will be dynamically specified.
return tf.placeholder(dtype=tf.int64, shape=[batch_size], name="labels")
| {
"content_hash": "a92d4cab043d4cbb81db045e38031e28",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 80,
"avg_line_length": 33.84,
"alnum_prop": 0.7044917257683215,
"repo_name": "jiaphuan/models",
"id": "566cc623b3d6c51c0bfe7935e2ddb579890bb7d2",
"size": "3126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/astronet/astronet/ops/input_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
} |
import sys, logging, unittest
import feedparser
from calendar import timegm
from time import gmtime
from operator import itemgetter
from lxml import html
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = 'testuser'
self.feedurl_invalid = 'http://www.cnn.com'
self.feedurl_valid = 'http://rss.cnn.com/rss/cnn_topstories'
self.past = timegm(gmtime()) - 15000
self.future = timegm(gmtime()) + 15000
def test_bad_url_past(self):
# Make sure an empty dict gets returned for an invalid URL
test_feed = rssdownload(self.username, self.feedurl_invalid, self.past)
self.assertTrue(len(test_feed['messages'])==0)
def test_bad_url_future(self):
# Make sure an empty dict gets returned for an invalid URL
test_feed = rssdownload(self.username, self.feedurl_invalid, self.future)
self.assertTrue(len(test_feed['messages'])==0)
def test_good_url_past(self):
# Make sure an empty dict gets returned for a valid URL
test_feed = rssdownload(self.username, self.feedurl_valid, self.past)
self.assertTrue(len(test_feed['messages'])>0, 'Probably no new links found...')
def test_good_url_future(self):
# Make sure an empty dict gets returned for a valid URL
test_feed = rssdownload(self.username, self.feedurl_valid, self.future)
self.assertTrue(len(test_feed['messages'])==0)
def rssdownload(username, feedurl, last_reference=0, mode=0):
''' --> rssdownload(username, feedurl, last_reference=0)
'username' is used exclusively for logging purposes at this time.
'feedurl' must be a valid RSS feed. Validation is performed by checking
the parsed data from the URL for the <title> tag, which is RSS 2.0 standard.
If feedurl is not a valid RSS URL by that standard, an empty dictionary object
is returned, and an error is logged.
'last_reference' is the Unix time (UTC Epoch) of the last time this URL was polled.
This time is determined by getting the time the most recent article was last updated.
Only links added or updated after last_reference are returned to the user. If there
are no new links, an error is logged and an empty dictionary object is returned.
mode 0 = default. mode 1 = will search the feed entries for some fields commonly used
to contain body text. If these fields are found, they will be parsed for links, and be
returned from this function as a separate dictionary object.'''
messages = []
feed = feedparser.parse(feedurl)
#Any of the items in srch can contain body text to parse for links
srch = ('content', 'summary', 'subtitle', 'description')
logger = logging.getLogger('proxy.rss')
logger.debug("User %s's update URL is %s" % (username, feedurl))
if 'title' not in feed.feed:
logger.error('User %s supplied a URL that does not seem to be a valid RSS feed (%s)' % (username, feedurl))
return {'messages':messages,'last_reference':last_reference, 'protected':False}
for item in feed.entries:
if timegm(item.updated_parsed) > last_reference:
message = {'url':item.link,
'timestamp':timegm(item.updated_parsed),
'description':item.title,
'extra':feed.feed.title,
'refer':''}
if mode == 1:
z = (linkmine(item[k]) for k in srch if k in item)
for index, item in enumerate(z):
link_key = 'deep_link%d' % index
message[link_key] = item
messages.append(message)
if len(messages) == 0:
if not feed.bozo:
logger.error("%s doesn't have anything new for us." % feed.feed.title)
else:
logger.warning("Malformed data at %s may have prevented proper update. Exception %s" % (feed.feed.title, g.bozo_exception.getMessage() + "on line %d" % g.bozo_exception.getLineNumber()))
return {'messages':messages, 'last_reference':last_reference, 'protected':False}
messages.sort(key=itemgetter('timestamp'))
last_ref = messages[-1]['timestamp']
return {'messages':messages, 'last_reference':last_ref, 'protected':False}
def linkmine(summary):
return (item[2] for item in html.iterlinks(summary))
| {
"content_hash": "2a9f00015da9a9096e1cd4f5a029d9af",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 199,
"avg_line_length": 45.89795918367347,
"alnum_prop": 0.6373943975100045,
"repo_name": "mattdeboard/rss-connector",
"id": "1a85c2a8c3cf6de3da082b79d594c817a334bab0",
"size": "4498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rss_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4518"
}
],
"symlink_target": ""
} |
from __future__ import division
import datetime
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes import generic
from django.db.models.signals import m2m_changed
from openbudget.apps.accounts.models import Account
from openbudget.apps.entities.models import Division, Entity
from openbudget.apps.sources.models import ReferenceSource, AuxSource
from openbudget.commons.mixins.models import TimeStampedModel, UUIDModel, \
PeriodStartModel, PeriodicModel, ClassMethodMixin
PATH_SEPARATOR = '|'
class TemplateManager(models.Manager):
"""Exposes the related_map methods for more efficient bulk select queries."""
def related_map_min(self):
return self.select_related().prefetch_related('divisions')
def related_map(self):
return self.select_related().prefetch_related('divisions', 'nodes')
def latest_of(self, entity):
return self.filter(using_sheets__entity=entity).latest('period_start')
#def sheets_of(self, entity):
# return self.filter(using_sheets__entity=entity)
class Template(TimeStampedModel, UUIDModel, PeriodStartModel, ClassMethodMixin):
"""Templates describe the structure of a Budget or an Actual."""
objects = TemplateManager()
divisions = models.ManyToManyField(
Division,
)
name = models.CharField(
_('Name'),
db_index=True,
max_length=255,
help_text=_('The name of this template.')
)
description = models.TextField(
_('Description'),
db_index=True,
blank=True,
help_text=_('An overview text for this template.')
)
referencesources = generic.GenericRelation(
ReferenceSource
)
auxsources = generic.GenericRelation(
AuxSource
)
@property
def period(self):
"""Get the applicable period for this object.
Objects are valid until the next object with a period_start in the
future from this one, or, until 'now' if there is no future object.
In the current case of multi-period ranges, returns a tuple of
datetime.year objects.
"""
# TODO: Support ranges other than yearly, including multiple ranges.
# TODO: Refactor to work with non-division templates.
start, end = None, None
ranges = settings.OPENBUDGETS_PERIOD_RANGES
if len(ranges) == 1 and 'yearly' in ranges:
start = self.period_start.year
qs = self.__class__.objects.filter(divisions__in=self.divisions.all())
for obj in qs:
if obj.period_start.year > self.period_start.year:
end = obj.period_start.year
else:
end = datetime.datetime.now().year
else:
# TODO: Verify - in the current codebase, we should never get here.
pass
return start, end
@property
def has_sheets(self):
return bool(self.sheets.count())
class Meta:
ordering = ['name']
verbose_name = _('template')
verbose_name_plural = _('templates')
@models.permalink
def get_absolute_url(self):
return 'template_detail', [self.uuid]
def __unicode__(self):
return self.name
class BaseNode(models.Model):
DIRECTIONS = (
('REVENUE', _('REVENUE')),
('EXPENDITURE', _('EXPENDITURE'))
)
name = models.CharField(
_('Name'),
db_index=True,
max_length=255,
help_text=_('The name of this template node.')
)
code = models.CharField(
_('Code'),
db_index=True,
max_length=50,
help_text=_('An identifying code for this template node.')
)
direction = models.CharField(
_('REVENUE/EXPENDITURE'),
db_index=True,
max_length=15,
choices=DIRECTIONS,
default=DIRECTIONS[0][0],
help_text=_('Every node must be either a revenue or expenditure node.')
)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='children'
)
inverse = models.ManyToManyField(
'self',
symmetrical=True,
null=True,
blank=True,
related_name='inverses',
help_text=_('Inverse relations across revenue and expenditure nodes.')
)
path = models.CharField(
_('Path'),
db_index=True,
max_length=255,
null=True,
blank=True,
editable=False,
help_text=_('A representation of the path to the root of the template '
'from this template node.')
)
backwards = models.ManyToManyField(
'self',
null=True,
blank=True,
symmetrical=False,
related_name='forwards'
)
class Meta:
abstract = True
class TemplateNodeManager(models.Manager):
"""Exposes the related_map methods for more efficient bulk select queries."""
def related_map_min(self):
return self.select_related('parent')
def related_map(self):
return self.select_related('parent').prefetch_related('children',
'templates',
'inverse',
'backwards')
class TemplateNode(BaseNode, TimeStampedModel, UUIDModel):
"""The nodes that make up a template."""
objects = TemplateNodeManager()
templates = models.ManyToManyField(
Template,
through='TemplateNodeRelation',
related_name='nodes'
)
description = models.TextField(
_('Entry description'),
blank=True,
help_text=_('A descriptive text for this template node.')
)
referencesources = generic.GenericRelation(
ReferenceSource
)
auxsources = generic.GenericRelation(
AuxSource
)
@property
def past(self):
nodes = list(self.backwards.all())
if len(nodes):
for node in nodes:
nodes += node.past
return nodes
@property
def future(self):
nodes = list(self.forwards.all())
if len(nodes):
for node in nodes:
nodes += node.future
return nodes
@property
def with_past(self):
return [self] + self.past
@property
def with_future(self):
return [self] + self.future
def timeline(self, include_future=False):
timeline = self.with_past
if include_future:
timeline += self.future
return timeline
def _get_path_to_root(self):
path = [self.code]
if self.parent:
parent_path = self.parent._get_path_to_root()
if parent_path:
path = path + parent_path
return path
def save(self, *args, **kwargs):
# TODO: Also need to handle path creation on updates, not only created.
if not self.id:
# set the `path` property if not set and needed
if not self.path:
self.path = PATH_SEPARATOR.join(self._get_path_to_root())
return super(TemplateNode, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
verbose_name = _('template node')
verbose_name_plural = _('template nodes')
@models.permalink
def get_absolute_url(self):
return 'template_node', [self.uuid]
def __unicode__(self):
return self.code
def clean(self):
if self.parent and not self.direction == self.parent.direction:
raise ValidationError('A node must have the same direction as its '
'parent.')
if self.parent is self:
raise ValidationError('A node cannot be its own parent.')
def inverse_changed(sender, instance, action, reverse, model, pk_set, **kwargs):
if action == 'pre_add':
# validate that inverse never points to self
if instance.pk in pk_set:
raise ValidationError(_('Inverse node can not point to self.'))
# validate that it always points to the opposite `direction`
if model.objects.filter(pk__in=pk_set, direction=instance.direction)\
.count():
raise ValidationError(_("Inverse node's direction can not be the "
"same as self direction."))
m2m_changed.connect(inverse_changed, sender=TemplateNode.inverse.through)
class TemplateNodeRelationManager(models.Manager):
"""Exposes the related_map method for more efficient bulk select queries."""
def related_map(self):
return self.select_related()
def has_same_node(self, node, template):
return self.filter(
node__code=node.code,
node__name=node.name,
node__parent=node.parent,
template=template
).count()
class TemplateNodeRelation(models.Model):
"""A relation between a node and a template"""
objects = TemplateNodeRelationManager()
template = models.ForeignKey(
Template
)
node = models.ForeignKey(
TemplateNode
)
def validate_unique(self, exclude=None):
"""Custom validation for our use case."""
super(TemplateNodeRelation, self).validate_unique(exclude)
if not bool(self.__class__.objects.has_same_node(self.node, self.template)):
raise ValidationError(_('Node with name: {name}; code: {code}; '
'parent: {parent}; already exists in '
'template: {template}'.format(
name=self.node.name, code=self.node.code,
parent=self.node.parent,
template=self.template)))
class Meta:
ordering = ['template__name', 'node__name']
verbose_name = _('template/node relation')
verbose_name = _('template/node relations')
unique_together = (
('node', 'template')
)
def __unicode__(self):
return '{template} -> {node}'.format(template=self.template,
node=self.node)
class SheetManager(models.Manager):
"""Exposes the related_map method for more efficient bulk select queries."""
def related_map_min(self):
return self.select_related('entity')
def related_map(self):
return self.select_related().prefetch_related('denormalizedsheetitems')
def latest_of(self, entity):
return self.filter(entity=entity).latest('period_start')
class Sheet(PeriodicModel, TimeStampedModel, UUIDModel, ClassMethodMixin):
"""A sheet describes the finances for the given period, exposing budget and actuals."""
objects = SheetManager()
entity = models.ForeignKey(
Entity,
related_name='sheets'
)
template = models.ForeignKey(
Template,
related_name='using_sheets'
)
description = models.TextField(
_('Description'),
db_index=True,
blank=True,
help_text=_('Descriptive text for this %(class)s')
)
referencesources = generic.GenericRelation(
ReferenceSource
)
auxsources = generic.GenericRelation(
AuxSource
)
@property
def budget_total(self):
tmp = [item.budget for item in self.items.all()]
value = sum(tmp)
return value
@property
def actual_total(self):
tmp = [item.actual for item in self.items.all()]
value = sum(tmp)
return value
@property
def item_count(self):
value = self.items.all().count()
return value
@property
def variance(self):
# Note: we imported division from __future__
value = round(self.budget_total / self.actual_total * 100, 2)
return value
class Meta:
ordering = ['entity']
verbose_name = _('sheet')
verbose_name_plural = _('sheets')
@models.permalink
def get_absolute_url(self):
return 'sheet_detail', [self.uuid]
def __unicode__(self):
return unicode(self.period)
class BaseItem(models.Model):
sheet = models.ForeignKey(
Sheet,
related_name='%(class)ss'
)
description = models.TextField(
_('Description'),
db_index=True,
blank=True,
help_text=_('Description that appears for this entry.')
)
budget = models.DecimalField(
_('Budget Amount'),
db_index=True,
max_digits=26,
decimal_places=2,
blank=True,
null=True,
help_text=_('The total budgeted amount of this entry.')
)
actual = models.DecimalField(
_('Actual Amount'),
db_index=True,
max_digits=26,
decimal_places=2,
blank=True,
null=True,
help_text=_('The total actual amount of this entry.')
)
class Meta:
abstract = True
class SheetItemManager(models.Manager):
"""Exposes the related_map method for more efficient bulk select queries."""
def get_queryset(self):
return super(SheetItemManager, self).select_related('node')
def related_map_min(self):
return self.select_related()
def related_map(self):
return self.select_related().prefetch_related('discussion')
def timeline(self, node_pks, entity_pk):
nodes = TemplateNode.objects.filter(id__in=node_pks)
timelines = []
if nodes.count():
for node in nodes:
timelines += node.timeline()
else:
raise TemplateNode.DoesNotExist()
items = self.model.objects.filter(node__in=timelines, sheet__entity=entity_pk).select_related('sheet')
return items
class SheetItem(BaseItem, TimeStampedModel, UUIDModel, ClassMethodMixin):
"""A single item in a given sheet."""
objects = SheetItemManager()
node = models.ForeignKey(
TemplateNode,
related_name='%(class)ss',
)
referencesources = generic.GenericRelation(
ReferenceSource
)
auxsources = generic.GenericRelation(
AuxSource
)
@property
def name(self):
value = self.node.name
return value
@property
def parent(self):
return self.sheet.sheetitems.get(node=self.node.parent)
@property
def children(self):
return self.sheet.sheetitems.filter(node__parent=self.node)
@property
def ancestors(self):
ancestors = []
current = self
try:
while current:
parent = current.parent
if parent:
ancestors.append(parent)
current = parent
except SheetItem.DoesNotExist:
pass
ancestors.reverse()
return ancestors
@property
def descendants(self):
descendants = []
children = self.children
if children.count():
descendants += children
for child in children:
descendants += child.descendants
return descendants
class Meta:
ordering = ['node']
verbose_name = _('sheet item')
verbose_name_plural = _('sheet items')
@models.permalink
def get_absolute_url(self):
return 'sheet_item_detail', [self.uuid]
def __unicode__(self):
return self.node.code
class SheetItemCommentManager(models.Manager):
"""Exposes the related_map method for more efficient bulk select queries."""
def get_queryset(self):
return super(SheetItemCommentManager, self).select_related()
def related_map_min(self):
return self.select_related('user')
def related_map(self):
return self.select_related()
def by_item(self, item_pk):
return self.filter(item=item_pk).related_map_min()
class SheetItemComment(TimeStampedModel, UUIDModel, ClassMethodMixin):
"""Comments on sheet items."""
objects = SheetItemCommentManager()
item = models.ForeignKey(
SheetItem,
related_name='discussion'
)
user = models.ForeignKey(
Account,
related_name='item_comments'
)
comment = models.TextField(
_('Comment'),
help_text=_('Add your comments to this discussion.')
)
class Meta:
ordering = ['user', 'last_modified']
verbose_name = _('sheet item comment')
verbose_name_plural = _('sheet item comments')
def __unicode__(self):
return self.comment
class DenormalizedSheetItemManager(models.Manager):
"""Exposes the related_map method for more efficient bulk select queries."""
def related_map_min(self):
return self.select_related()
def related_map(self):
return self.select_related().prefetch_related('discussion')
# def timeline(self, node_uuid, entity_uuid):
# try:
# node = TemplateNode.objects.get(uuid=node_uuid)
# except TemplateNode.DoesNotExist as e:
# raise e
# value = self.model.objects.filter(node__in=node.timeline,
# budget__entity__uuid=entity_uuid)
# return value
class DenormalizedSheetItem(BaseNode, BaseItem, UUIDModel, ClassMethodMixin):
objects = DenormalizedSheetItemManager()
normal_item = models.OneToOneField(
SheetItem,
related_name='denormalized'
)
node_description = models.TextField(
_('Entry description'),
blank=True,
help_text=_('A descriptive text for this template node underlying this sheet item.')
)
class Meta:
ordering = ['code']
verbose_name = _('denormalized sheet item')
verbose_name_plural = _('denormalized sheet items')
@models.permalink
def get_absolute_url(self):
return 'denormalized_sheet_item_detail', [self.uuid]
def __unicode__(self):
return self.code
| {
"content_hash": "0290af3880f8d059ec2422874ec093b3",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 110,
"avg_line_length": 28.408450704225352,
"alnum_prop": 0.5964854294056079,
"repo_name": "zbyufei/open-budgets",
"id": "fff39d85ba9cb160b87ac08f321ad25af7ac613f",
"size": "18153",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "openbudget/apps/sheets/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'''
Description
If using files, (call by command line or from python):
all the inputs are raw float32 vectors files that are reshaped by the number
of f0 values in ff0.
There are three safe patches that were not described in the publication[1]:
(These are not critical, they might remove a few artifacts here and there).
* The noise mask is slightly low-passed (smoothed) across frequency
(def. 9 bins freq. window), in order to avoid cliffs in frequency domain
that end up creating Gibbs phenomenon in the time domain.
* High-pass filtering (def. 0.5*f0 cut-off)
This centers each synthesized segment around zero, to avoid cutting
any DC residual component (e.g. comming from the spectral envelope).
* Short half-window (def. 1ms (yes, one ms)) on the left of the pulse,
in order to avoid any pre-echos.
Reference
[1] G. Degottex, P. Lanchantin, and M. Gales, "A Pulse Model in Log-domain
for a Uniform Synthesizer," in Proc. 9th Speech Synthesis Workshop
(SSW9), 2016.
Copyright(C) 2016 Engineering Department, University of Cambridge, UK.
License
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author
Gilles Degottex <gad27@cam.ac.uk>
'''
import argparse
import sys
import os
import numpy as np
np.random.seed(0) # Generate always the same "random" numbers, for debugging.
import scipy
import sigproc as sp
def synthesize(fs, f0s, SPEC, NM=None, wavlen=None
, f0s_rmsteps=False # Removes steps in the f0 curve
# (see sigproc.resampling.f0s_rmsteps(.) )
, ener_multT0=False
, nm_lowpasswinlen=9
, hp_f0coef=0.5 # factor of f0 for the cut-off of the high-pass filter (def. 0.5*f0)
, antipreechohwindur=0.001 # [s]
, verbose=1):
# Copy the inputs to avoid modifying them
f0s = f0s.copy()
SPEC = SPEC.copy()
if not NM is None: NM = NM.copy()
else: NM = np.zeros(SPEC.shape)
# Check the size of the inputs
if f0s.shape[0]!=SPEC.shape[0]:
raise ValueError('F0 size {} and spectrogram size {} do not match'.format(len(f0), SPEC.shape[0]))
if not NM is None:
if SPEC.shape!=NM.shape:
raise ValueError('spectrogram size {} and NM size {} do not match.'.format(SPEC.shape, NM.shape))
if wavlen==None: wavlen = int(np.round(f0s[-1,0]*fs))
dftlen = (SPEC.shape[1]-1)*2
shift = np.median(np.diff(f0s[:,0]))
if verbose>0:
print('PM Synthesis (dur={}s, fs={}Hz, f0 in [{:.0f},{:.0f}]Hz, shift={}s, dftlen={})'.format(wavlen/float(fs), fs, np.min(f0s[:,1]), np.max(f0s[:,1]), shift, dftlen))
# Prepare the features
# Enforce continuous f0
f0s[:,1] = np.interp(f0s[:,0], f0s[f0s[:,1]>0,0], f0s[f0s[:,1]>0,1])
# If asked, removes steps in the f0 curve
if f0s_rmsteps:
f0s = sp.f0s_rmsteps(f0s)
if not NM is None:
# Remove noise below f0, as it is supposed to be already the case
for n in range(NM.shape[0]):
NM[n,:int((float(dftlen)/fs)*2*f0s[n,1])] = 0.0
# Generate the pulse positions [1](2) (i.e. the synthesis instants, the GCIs in voiced segments)
ts = [0.0]
while ts[-1]<float(wavlen)/fs:
cf0 = np.interp(ts[-1], f0s[:,0], f0s[:,1])
if cf0<50.0: cf0 = 50
ts.append(ts[-1]+(1.0/cf0))
ts = np.array(ts)
f0s = np.vstack((ts, np.interp(ts, f0s[:,0], f0s[:,1]))).T
# Resample the features to the pulse positions
# Spectral envelope uses the nearest, to avoid over-smoothing
SPECR = np.zeros((f0s.shape[0], dftlen/2+1))
for n, t in enumerate(f0s[:,0]): # Nearest: Way better for plosives
idx = int(np.round(t/shift))
idx = np.clip(idx, 0, SPEC.shape[0]-1)
SPECR[n,:] = SPEC[idx,:]
# Resample the noise feature to the pulse positions
# Smooth the frequency response of the mask in order to avoid Gibbs
# (poor Gibbs nobody want to see him)
nm_lowpasswin = np.hanning(nm_lowpasswinlen)
nm_lowpasswin /= np.sum(nm_lowpasswin)
NMR = np.zeros((f0s.shape[0], dftlen/2+1))
for n, t in enumerate(f0s[:,0]):
idx = int(np.round(t/shift)) # Nearest is better for plosives
idx = np.clip(idx, 0, NM.shape[0]-1)
NMR[n,:] = NM[idx,:]
NMR[n,:] = scipy.signal.filtfilt(nm_lowpasswin, [1.0], NMR[n,:])
NMR = np.clip(NMR, 0.0, 1.0)
# The complete waveform that we will fill with the pulses
wav = np.zeros(wavlen)
# Half window on the left of the synthesized segment to avoid pre-echo
dampinhwin = np.hanning(1+2*int(np.round(antipreechohwindur*fs))) # 1ms forced dampingwindow
dampinhwin = dampinhwin[:(len(dampinhwin)-1)/2+1]
for n, t in enumerate(f0s[:,0]):
f0 = f0s[n,1]
if verbose>1: print "\rPM Synthesis (python) t={:4.3f}s f0={:3.3f}Hz ".format(t,f0),
# Window's length
# TODO It should be ensured that the beggining and end of the
# noise is within the window. Nothing is doing this currently!
winlen = int(np.max((0.050*fs, 3*fs/f0))/2)*2+1 # Has to be odd
# TODO We also assume that the VTF's decay is shorter
# than 2 periods (dangerous with high pitched tense voice).
if winlen>dftlen: raise ValueError('winlen>dftlen')
# Set the rough position of the pulse in the window (the closest sample)
# We keep a third of the window (1 period) on the left because the
# pulse signal is minimum phase. And 2/3rd (remaining 2 periods)
# on the right to let the VTF decay.
pulseposinwin = int(0.33*winlen)
# The sample indices of the current pulse wrt. the final waveform
winidx = int(round(fs*t)) + np.arange(winlen)-pulseposinwin
# Build the pulse spectrum
# Let start with a Dirac
S = np.ones(dftlen/2+1, dtype=np.complex64)
# Add the delay to place the Dirac at the "GCI": exp(-j*2*pi*t_i)
delay = -pulseposinwin - fs*(t-int(round(fs*t))/float(fs))
S *= np.exp((delay*2j*np.pi/dftlen)*np.arange(dftlen/2+1))
# Add the spectral envelope
# Both amplitude and phase
E = SPECR[n,:] # Take the amplitude from the given one
if hp_f0coef!=None:
# High-pass it to avoid any residual DC component.
HP = sp.butter2hspec(hp_f0coef*f0, 4, fs, dftlen, high=True)
E *= HP
# Not necessarily good as it is non-causal, so make it causal...
# ... together with the VTF response below.
# Build the phase of the envelope from the amplitude
E = sp.hspec2minphasehspec(E, replacezero=True) # We spend 2 FFT here!
S *= E # Add it to the current pulse
# Add energy correction wrt f0.
# STRAIGHT and AHOCODER vocoders do it.
# (why ? to equalize the energy when changing the pulse's duration ?)
if ener_multT0:
S *= np.sqrt(fs/f0)
# Generate the segment of Gaussian noise
# Use mid-points before/after pulse position
if n>0: leftbnd=int(np.round(fs*0.5*(f0s[n-1,0]+t)))
else: leftbnd=int(np.round(fs*(t-0.5/f0s[n,1]))) # int(0)
if n<f0s.shape[0]-1: rightbnd=int(np.round(fs*0.5*(t+f0s[n+1,0])))-1
else: rightbnd=int(np.round(fs*(t+0.5/f0s[n,1]))) #rightbnd=int(wavlen-1)
gausswinlen = rightbnd-leftbnd # The length of the noise segment
gaussnoise4win = np.random.normal(size=(gausswinlen)) # The noise
GN = np.fft.rfft(gaussnoise4win, dftlen) # Move the noise to freq domain
# Normalize it by its energy (@Yannis, That's your answer at SSW9!)
GN /= np.sqrt(np.mean(np.abs(GN)**2))
# Place the noise within the pulse's window
delay = (pulseposinwin-(leftbnd-winidx[0]))
GN *= np.exp((delay*2j*np.pi/dftlen)*np.arange(dftlen/2+1))
# Add it to the pulse spectrum, under the condition of the mask
S *= GN**NMR[n,:]
# That's it! the pulse spectrum is ready!
# Move it to time domain
deter = np.fft.irfft(S)[0:winlen]
# Add half window on the left of the synthesized segment
# to avoid any possible pre-echo
deter[:leftbnd-winidx[0]-len(dampinhwin)] = 0.0
deter[leftbnd-winidx[0]-len(dampinhwin):leftbnd-winidx[0]] *= dampinhwin
# Write the synthesized segment in the final waveform
if winidx[0]<0 or winidx[-1]>=wavlen:
# The window is partly outside of the waveform ...
wav4win = np.zeros(winlen)
# ... thus copy only the existing part
itouse = np.logical_and(winidx>=0,winidx<wavlen)
wav[winidx[itouse]] += deter[itouse]
else:
wav[winidx] += deter
if verbose>1: print '\r \r',
if verbose>2:
import matplotlib.pyplot as plt
plt.ion()
f, axs = plt.subplots(3, 1, sharex=True, sharey=False)
times = np.arange(len(wav))/float(fs)
axs[0].plot(times, wav, 'k')
axs[0].set_ylabel('Waveform\nAmplitude')
axs[0].grid()
axs[1].plot(f0s[:,0], f0s[:,1], 'k')
axs[1].set_ylabel('F0\nFrequency [Hz]')
axs[1].grid()
axs[2].imshow(sp.mag2db(SPEC).T, origin='lower', aspect='auto', interpolation='none', extent=(f0s[0,0], f0s[-1,0], 0, 0.5*fs))
axs[2].set_ylabel('Amp. Envelope\nFrequency [Hz]')
from IPython.core.debugger import Pdb; Pdb().set_trace()
return wav
def synthesizef(fs, shift=0.005, dftlen=4096, ff0=None, flf0=None, fspec=None, fmcep=None, fpdd=None, fnm=None, fbndnm=None, fsyn=None, verbose=1):
'''
Call the synthesis from python using file inputs and outputs
'''
if ff0:
f0 = np.fromfile(ff0, dtype=np.float32)
if flf0:
f0 = np.fromfile(flf0, dtype=np.float32)
f0[f0>0] = np.exp(f0[f0>0])
ts = (shift)*np.arange(len(f0))
f0s = np.vstack((ts, f0)).T
if fspec:
SPEC = np.fromfile(fspec, dtype=np.float32)
SPEC = SPEC.reshape((len(f0), -1))
if fmcep:
SPEC = np.fromfile(fmcep, dtype=np.float32)
SPEC = SPEC.reshape((len(f0), -1))
SPEC = sp.mcep2spec(SPEC, sp.bark_alpha(fs), dftlen)
if fpdd:
PDD = np.fromfile(fpdd, dtype=np.float32)
PDD = PDD.reshape((len(f0), -1))
thresh = 0.75 # DegottexG2015jhmpd
NM = PDD.copy()
NM[PDD<thresh] = 0.0
NM[PDD>thresh] = 1.0
if fnm:
NM = np.fromfile(fnm, dtype=np.float32)
NM = NM.reshape((len(f0), -1))
if fbndnm:
BNDNM = np.fromfile(fbndnm, dtype=np.float32)
BNDNM = BNDNM.reshape((len(f0), -1))
NM = sp.fwbnd2linbnd(BNDNM, fs, dftlen)
NM[NM<=0.5] = 0.0
NM[NM>0.5] = 1.0
syn = synthesize(fs, f0s, SPEC, NM=NM, verbose=verbose)
if fsyn:
sp.wavwrite(fsyn, syn, fs, norm_abs=True, verbose=verbose)
return syn
if __name__ == "__main__" :
'''
Call the synthesis from the command line
'''
argpar = argparse.ArgumentParser()
argpar.add_argument("synthfile", help="Output synthesis file")
argpar.add_argument("--f0file", default=None, help="Input f0 file (values in [Hz])")
argpar.add_argument("--logf0file", default=None, help="Input f0 file (values in [log Hz])")
argpar.add_argument("--specfile", default=None, help="Input amplitude spectrogram (linear values)")
argpar.add_argument("--mcepfile", default=None, help="Input amplitude spectrogram (mel-cepstrum values)")
argpar.add_argument("--pddfile", default=None, help="Input Phase Distortion Deviation file (linear values)")
argpar.add_argument("--noisemaskfile", default=None, help="Output Noise Mask (linear values in [0,1])")
argpar.add_argument("--noisemask_nbbnds", default=None, type=int, help="Number of mel-bands in the compressed noise mask (None: Consider there is no compression)")
argpar.add_argument("--fs", default=16000, type=int, help="Sampling frequency [Hz]")
argpar.add_argument("--shift", default=0.005, type=float, help="Time step[ms] between the frames")
#argpar.add_argument("--dftlen", dftlen=4096, type=float, help="Size of the DFT for extracting the features")
argpar.add_argument("--verbose", default=1, help="Output some information")
args = argpar.parse_args()
args.dftlen = 4096
synthesizef(args.fs, shift=args.shift, dftlen=args.dftlen, ff0=args.f0file, flf0=args.logf0file, fspec=args.specfile, fmcep=args.mcepfile, fnm=(None if args.noisemask_nbbnds else args.noisemaskfile), fbndnm=(args.noisemaskfile if args.noisemask_nbbnds else None), fpdd=args.pddfile, fsyn=args.synthfile, verbose=args.verbose)
| {
"content_hash": "8ea7dce5aa6d8fa3c00bc07b58fca8f0",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 329,
"avg_line_length": 42.845659163987136,
"alnum_prop": 0.6212382739212008,
"repo_name": "etosha/pulsemodel",
"id": "539ee22aff4b3d26c97adb5787c77e8b2ecd9ef6",
"size": "13347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synthesis.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4725"
},
{
"name": "Python",
"bytes": "27763"
}
],
"symlink_target": ""
} |
import json
import os
from build_fail import BuildFail
from build_pass import BuildPass
from slackclient import SlackClient
"""
Split a comma seperated string into a list, removing any white space while your there.
"""
def spliterator(bad_string):
if bad_string:
return bad_string.replace(' ', '').split(',')
result = os.environ['WERCKER_RESULT']
"""
Only proceed if we have a vaild build result.
"""
if result:
icon_url = os.getenv('WERCKER_SLACK_NOTIFY_ICON_URL')
notify_fail = os.getenv('WERCKER_SLACK_NOTIFY_NOTIFY_ON_FAIL')
notify_success = os.getenv('WERCKER_SLACK_NOTIFY_NOTIFY_ON_SUCCESS')
project_name = os.environ['WERCKER_GIT_REPOSITORY']
branch = os.environ['WERCKER_GIT_BRANCH']
"""
Check the outcome of the build and send the relevant message.
"""
slack_client = SlackClient(os.environ['SLACK_BOT_TOKEN'])
if result == 'failed':
message = BuildFail(project_name,
branch,
icon_url)
if not notify_fail:
for channel in spliterator(notify_fail):
message.send(slack_client, channel)
else:
message = BuildPass(project_name,
icon_url,
os.environ['VERSION_NAME'])
"""
If 'notify_success' is empty set the notify channel to 'default_channel'.
If its empty too, set the notify channel to '#general'.
"""
if not notify_success:
default_channel = os.getenv('WERCKER_SLACK_NOTIFY_DEFAULT_CHANNEL')
notify_success = default_channel if default_channel else '#general'
for channel in spliterator(notify_success):
if channel:
message.send(slack_client, channel)
else:
print('-----------------------------------------')
print('No build result found, skipping this step')
print('-----------------------------------------')
| {
"content_hash": "2d105face5adb8bfdaeb3c21fa5ce160",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 33.20338983050848,
"alnum_prop": 0.590096988259316,
"repo_name": "otormaigh/slack-notify-wercker-step",
"id": "4601b46627cc8856b588f9534fb1d6043eee49be",
"size": "1983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7472"
},
{
"name": "Shell",
"bytes": "731"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.python.training.saver.py."""
import functools
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.module import module
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(module.Module):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class TrackableCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
root = self._initialized_model()
object_saver = trackable_utils.Checkpoint(root=root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
# An incompatible object-based checkpoint to check error messages
var = variables.Variable(1., name="a")
self.evaluate(var.initializer)
second_saver = trackable_utils.Checkpoint(v=var)
second_path = second_saver.save(file_prefix=os.path.join(
checkpoint_directory, "second"))
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as sess:
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver()
saver.restore(sess=sess, save_path=save_path)
self._check_sentinels(root)
before_second_restore_ops = restore_graph.get_operations()
# Test that multiple restores do not pollute the graph
saver.restore(sess=sess, save_path=save_path)
self.assertEqual(before_second_restore_ops,
restore_graph.get_operations())
with self.assertRaisesRegex(errors.NotFoundError,
"Could not find some variables"):
saver.restore(sess=sess, save_path=second_path)
def testLoadFromObjectBasedEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
root = self._initialized_model()
object_saver = trackable_utils.Checkpoint(root=root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver(
root.model.variables + root.optimizer.variables())
saver.restore(sess=None, save_path=save_path)
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| {
"content_hash": "e1a2d5e059e588b6a02dd0a36b57414f",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 76,
"avg_line_length": 39.05714285714286,
"alnum_prop": 0.6978785662033651,
"repo_name": "sarvex/tensorflow",
"id": "a92378834c659efccee7ac4cc302bbe40329488a",
"size": "6156",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/python/keras/tests/saver_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
import logging
from flask import render_template, request, session, url_for
from flask_wtf.csrf import CSRFError
from requests.exceptions import ConnectionError
from structlog import wrap_logger
from werkzeug.utils import redirect
from frontstage import app
from frontstage.common.session import Session
from frontstage.exceptions.exceptions import (
ApiError,
IncorrectAccountAccessError,
InvalidEqPayLoad,
JWTValidationError,
)
logger = wrap_logger(logging.getLogger(__name__))
@app.errorhandler(400)
def client_error(error):
logger.info("Client error", url=request.url, status_code=error.code)
return render_template("errors/400-error.html"), 400
@app.errorhandler(404)
def not_found_error(error):
logger.info("Not found error", url=request.url, status_code=error.code)
return render_template("errors/404-error.html"), 404
@app.errorhandler(CSRFError)
def handle_csrf_error(error):
logger.warning("CSRF token has expired", error_message=error.description, status_code=error.code)
session_key = request.cookies.get("authorization")
session_handler = Session.from_session_key(session_key)
encoded_jwt = session_handler.get_encoded_jwt()
if not encoded_jwt:
return render_template("errors/400-error.html"), 400
else:
session["next"] = request.url
return redirect(url_for("sign_in_bp.logout", csrf_error=True))
@app.errorhandler(ApiError)
def api_error(error):
logger.error(
error.message or "Api failed to retrieve required data",
url=request.url,
status_code=500,
api_url=error.url,
api_status_code=error.status_code,
**error.kwargs
)
return render_template("errors/500-error.html"), 500
@app.errorhandler(ConnectionError)
def connection_error(error):
logger.error("Failed to connect to external service", url=request.url, status_code=500, api_url=error.request.url)
return render_template("errors/500-error.html"), 500
@app.errorhandler(JWTValidationError)
def jwt_validation_error(error):
logger.error("JWT validation error", url=request.url, status_code=403)
return render_template("errors/403-error.html"), 403
@app.errorhandler(Exception)
def server_error(error):
logger.error("Generic exception generated", exc_info=error, url=request.url, status_code=500)
return render_template("errors/500-error.html"), getattr(error, "code", 500)
@app.errorhandler(InvalidEqPayLoad)
def eq_error(error):
logger.error("Failed to generate EQ URL", error=error.message, url=request.url, status_code=500)
return render_template("errors/500-error.html"), 500
@app.errorhandler(IncorrectAccountAccessError)
def secure_message_forbidden_error(error):
logger.info(
"Attempt to access secure message without correct session permission",
url=request.url,
message=error.message,
thread_id=error.thread,
)
return render_template("errors/403-incorrect-account-error.html")
| {
"content_hash": "7b6ceaef94891b16b0ec90c8c2fa2c96",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 118,
"avg_line_length": 32.52173913043478,
"alnum_prop": 0.7309491978609626,
"repo_name": "ONSdigital/ras-frontstage",
"id": "9af7a12cb201ea222bcf6bd962130d443e8794c6",
"size": "2992",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "frontstage/error_handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "592"
},
{
"name": "Dockerfile",
"bytes": "621"
},
{
"name": "HTML",
"bytes": "269090"
},
{
"name": "Makefile",
"bytes": "824"
},
{
"name": "Python",
"bytes": "705890"
},
{
"name": "Shell",
"bytes": "2874"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MaxValueValidator
# from taggit.managers import TaggableManager
from tagging.fields import TagField
from tagging_autocomplete.models import TagAutocompleteField
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
about_me = models.TextField(_("about me"), null=True, blank=True)
avatar = models.ImageField(upload_to='avatars', blank=True, null=True)
my_project_experience = models.URLField(_("URL for project"), null=True, blank=True)
phone = models.CharField(_("phone"), max_length=50, null=True, blank=True)
def __str__(self):
return '%s %s' % (self.username, self.about_me)
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
class Meta:
verbose_name = 'user'
verbose_name_plural = 'users'
ordering = ['username', ]
class Skill(models.Model):
programming_lang = models.CharField(_("language"), max_length=255)
skills = models.ManyToManyField(User, through='SkillUser')
def __str__(self):
return self.programming_lang
class Meta:
verbose_name = 'skill'
verbose_name_plural = 'skills'
class SkillUser(models.Model):
user = models.ForeignKey(User)
skill = models.ForeignKey(Skill)
level = models.PositiveSmallIntegerField(_("level"), validators=[MaxValueValidator(5)])
class Meta:
verbose_name = 'skill_user'
verbose_name_plural = 'skills_users'
| {
"content_hash": "e8799d49ee5df8f9d80f8e05ae668a65",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 33.08620689655172,
"alnum_prop": 0.7107868681605003,
"repo_name": "WarszawskaGrupaWspolnegoKodowania/Coding_Buddy",
"id": "aa367be116a81b9ab1d0804089319061881c27e5",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Coding_Buddy/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1775"
},
{
"name": "HTML",
"bytes": "20376"
},
{
"name": "JavaScript",
"bytes": "3142"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "43464"
},
{
"name": "Shell",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from flask import Flask, request, json
import RPi.GPIO as GPIO
import threading
import time
import socket
import ast
import Adafruit_DHT
GPIO.setmode(GPIO.BCM)
USE_TEST_TEMPERATURES = False
app = Flask(__name__)
class sensorReader(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.exitapp = False
print ('SENSOR SERVER STARTED')
if USE_TEST_TEMPERATURES:
global server_socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('localhost', 5001))
server_socket.listen(5)
def run(self):
global data
if USE_TEST_TEMPERATURES:
(client_socket, address) = server_socket.accept()
while not self.exitapp:
size = len(data['sensors'])
if (size!=0):
client_socket.send (str(size))
values = client_socket.recv(512)
#print "RECEIVED:" , values
parsedValues = json.loads(values)
for x in range(size):
data['sensors'][x][str(x+1)]['value'] = parsedValues[x]
else:
while not self.exitapp:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 5)
data['sensors'][0]['1']['value'] = str(int(temperature))
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
time.sleep(1)
class actuatorTrigger(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.exitapp = False
GPIO.setup(4, GPIO.OUT)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
def run(self):
global data
pin = [4,17,27]
while not self.exitapp:
x=1
tempCount = 0
for t in data['thermostats']:
mode=t.get(str(x))['mode']
if mode == 'ON':
GPIO.output(pin[x-1], True)
if mode == 'OFF':
GPIO.output(pin[x-1], False)
if mode == 'AUTO':
for s in t.get(str(x))['sensors']:
tempCount += int(data['sensors'][s-1][str(s)]['value'])
'''print tempCount'''
avg = tempCount / float(len(t.get(str(x))['sensors']))
'''print avg'''
'''print t.get(str(x))['temperature']'''
if (t.get(str(x))['hot']!='true'):
if (float(t.get(str(x))['temperature'])-avg)<0.5:
GPIO.output(pin[x-1], True)
else:
GPIO.output(pin[x-1], False)
else:
if (float(t.get(str(x))['temperature'])-avg)<0.5:
GPIO.output(pin[x-1], False)
else:
GPIO.output(pin[x-1], True)
x=x+1
time.sleep(1)
@app.route("/")
def hello():
"""Brief introduction message"""
return "Hello this is the API server of a smart thermostate!"
@app.route('/temp', methods=['GET','DELETE','POST'])
def showTemp():
"""Offers the three available methods of the api for the temperature sensors
GET - Lists all the sensors values
POST - Adds a new temperature sensor
DELETE - Delete all sensors
"""
global data
if request.method == 'GET':
return json.dumps(data.get('sensors'), indent=4)
if request.method == 'DELETE':
data['sensors'] = []
file = open('testData.json','w')
json.dump(data,file,indent=4)
file.close()
return "All sensors deleted successfully"
if request.method == 'POST':
id = len(data['sensors'])+1
temp= {str(id) : {"value":"0", "name":request.form['name']}}
data['sensors'].append(temp)
file = open('testData.json','w')
json.dump(data,file,indent=4)
file.close()
return "New temperature value created successfully"
else:
return "Not a valid method"
@app.route('/thermo/<thermid>', methods=['GET','PUT'])
def getThermostate(thermid):
"""Retunrs the thermostat data specified by <thermid>"""
global data
id = int(thermid)
if request.method == 'GET':
return json.dumps(data['thermostats'][id-1].get(str(id)), indent=4)
if request.method == 'PUT':
temp = request.form['temperature']
data['thermostats'][id-1].get(str(id))['temperature']=temp
mode = request.form['mode']
data['thermostats'][id-1].get(str(id))['mode']=mode
sensors = request.form['sensors']
sensors= ast.literal_eval(sensors)
data['thermostats'][id-1].get(str(id))['sensors']=sensors
time_programming = (request.form['time_programming'])
print (time_programming)
''' n=json.dumps(time_programming)'''
data['thermostats'][id-1].get(str(id))['time']=json.loads(time_programming)
hot = (request.form['hot'])
data['thermostats'][id-1].get(str(id))['hot']=hot
file = open('testData.json','w')
json.dump(data,file,indent=4)
file.close()
return ' '
@app.route('/thermo', methods=['GET','POST','DELETE'])
def showThermo():
"""Offers the three available methods of the api for the thermostates
GET - Lists all thermostates
POST - Adds a default thermostate with no sensors assigned and 21 degree
DELETE - Delete all thermostates
"""
global data
if request.method == 'GET':
return json.dumps(data['thermostats'], indent=4)
if request.method == 'POST':
id = len(data['thermostats'])+1
thermo= {str(id) : {"name":request.form['name'], 'sensors':[], 'temperature':'21', 'mode':'OFF'}}
data['thermostats'].append(thermo)
file = open('testData.json','w')
json.dump(data,file,indent=4)
file.close()
return "New thermostate created successfully"
if request.method == 'DELETE':
data['thermostats']=[]
file = open('testData.json','w')
json.dump(data,file,indent=4)
file.close()
return "All thermostates deleted successfully"
else:
return "Not a valid method"
def main():
global data
file=open('testData.json','r')
data = json.load(file)
file.close()
mySensorReader = sensorReader()
mySensorReader.start()
myActuatorTrigger = actuatorTrigger()
myActuatorTrigger.start()
app.run(host='0.0.0.0', port=6789,threaded=True, debug=False)
try:
mySensorReader.join()
myActuatorTrigger.join()
except KeyboardInterrupt:
mySensorReader.exitapp = True
myActuatorTrigger.exitapp = True
GPIO.cleanup()
if __name__ == "__main__":
main()
| {
"content_hash": "3d510b4089459a1d3e46d1b59e963dbe",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 107,
"avg_line_length": 36.84126984126984,
"alnum_prop": 0.5460290104839868,
"repo_name": "mpascu/SmartThermostatServer",
"id": "af870d44380e3fdcaa397f30d62f1b1b9861168d",
"size": "6963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1884"
},
{
"name": "Python",
"bytes": "6963"
}
],
"symlink_target": ""
} |
import re
from crud_object import main
def test_main(cloud_config, capsys):
main(cloud_config.storage_bucket, __file__)
out, err = capsys.readouterr()
assert not re.search(r'Downloaded file [!]=', out)
assert re.search(r'Uploading.*Fetching.*Deleting.*Done', out, re.DOTALL)
| {
"content_hash": "e776e266c501255e430831ad16dd92b4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.6915254237288135,
"repo_name": "clarko1/Cramd",
"id": "89e764b4d58788b5eb9823f4d7bb903ab0f5d435",
"size": "870",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "storage/api/crud_object_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "HTML",
"bytes": "23592"
},
{
"name": "JavaScript",
"bytes": "11222"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "8810"
},
{
"name": "Python",
"bytes": "1055640"
},
{
"name": "Shell",
"bytes": "8344"
}
],
"symlink_target": ""
} |
import roslib; roslib.load_manifest('ur_driver')
import time, sys, threading, math
import copy
import datetime
import socket, select
import struct
import traceback, code
import optparse
import SocketServer
import rospy
import actionlib
from sensor_msgs.msg import JointState
from control_msgs.msg import FollowJointTrajectoryAction
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import WrenchStamped
from dynamic_reconfigure.server import Server
from ur_driver.cfg import URDriverConfig
from ur_driver.deserialize import RobotState, RobotMode
from ur_driver.deserializeRT import RobotStateRT
from ur_msgs.srv import SetPayload, SetIO
from ur_msgs.msg import *
# renaming classes
DigitalIn = Digital
DigitalOut = Digital
Flag = Digital
prevent_programming = False
# Joint offsets, pulled from calibration information stored in the URDF
#
# { "joint_name" : offset }
#
# q_actual = q_from_driver + offset
joint_offsets = {}
PORT=30002 # 10 Hz, RobotState
RT_PORT=30003 #125 Hz, RobotStateRT
DEFAULT_REVERSE_PORT = 50001 #125 Hz, custom data (from prog)
MSG_OUT = 1
MSG_QUIT = 2
MSG_JOINT_STATES = 3
MSG_MOVEJ = 4
MSG_WAYPOINT_FINISHED = 5
MSG_STOPJ = 6
MSG_SERVOJ = 7
MSG_SET_PAYLOAD = 8
MSG_WRENCH = 9
MSG_SET_DIGITAL_OUT = 10
MSG_GET_IO = 11
MSG_SET_FLAG = 12
MSG_SET_TOOL_VOLTAGE = 13
MSG_SET_ANALOG_OUT = 14
MULT_payload = 1000.0
MULT_wrench = 10000.0
MULT_jointstate = 10000.0
MULT_time = 1000000.0
MULT_blend = 1000.0
MULT_analog = 1000000.0
MULT_analog_robotstate = 0.1
#Max Velocity accepted by ur_driver
MAX_VELOCITY = 10.0
#Using a very high value in order to not limit execution of trajectories being sent from MoveIt!
#Bounds for SetPayload service
MIN_PAYLOAD = 0.0
MAX_PAYLOAD = 1.0
#Using a very conservative value as it should be set throught the parameter server
FUN_SET_DIGITAL_OUT = 1
FUN_SET_FLAG = 2
FUN_SET_ANALOG_OUT = 3
FUN_SET_TOOL_VOLTAGE = 4
IO_SLEEP_TIME = 0.05
JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
Q1 = [2.2,0,-1.57,0,0,0]
Q2 = [1.5,0,-1.57,0,0,0]
Q3 = [1.5,-0.2,-1.57,0,0,0]
connected_robot = None
connected_robot_lock = threading.Lock()
connected_robot_cond = threading.Condition(connected_robot_lock)
last_joint_states = None
last_joint_states_lock = threading.Lock()
pub_joint_states = rospy.Publisher('joint_states', JointState, queue_size=1)
pub_wrench = rospy.Publisher('wrench', WrenchStamped, queue_size=1)
pub_io_states = rospy.Publisher('io_states', IOStates, queue_size=1)
#dump_state = open('dump_state', 'wb')
class EOF(Exception): pass
def dumpstacks():
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""), threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
print "\n".join(code)
def log(s):
print "[%s] %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), s)
RESET_PROGRAM = '''def resetProg():
sleep(0.0)
end
'''
#RESET_PROGRAM = ''
class URConnection(object):
TIMEOUT = 1.0
DISCONNECTED = 0
CONNECTED = 1
READY_TO_PROGRAM = 2
EXECUTING = 3
def __init__(self, hostname, port, program):
self.__thread = None
self.__sock = None
self.robot_state = self.DISCONNECTED
self.hostname = hostname
self.port = port
self.program = program
self.last_state = None
def connect(self):
if self.__sock:
self.disconnect()
self.__buf = ""
self.robot_state = self.CONNECTED
self.__sock = socket.create_connection((self.hostname, self.port))
self.__keep_running = True
self.__thread = threading.Thread(name="URConnection", target=self.__run)
self.__thread.daemon = True
self.__thread.start()
def send_program(self):
global prevent_programming
if prevent_programming:
rospy.loginfo("Programming is currently prevented")
return
assert self.robot_state in [self.READY_TO_PROGRAM, self.EXECUTING]
rospy.loginfo("Programming the robot at %s" % self.hostname)
self.__sock.sendall(self.program)
self.robot_state = self.EXECUTING
def send_reset_program(self):
self.__sock.sendall(RESET_PROGRAM)
self.robot_state = self.READY_TO_PROGRAM
def disconnect(self):
if self.__thread:
self.__keep_running = False
self.__thread.join()
self.__thread = None
if self.__sock:
self.__sock.close()
self.__sock = None
self.last_state = None
self.robot_state = self.DISCONNECTED
def ready_to_program(self):
return self.robot_state in [self.READY_TO_PROGRAM, self.EXECUTING]
def __trigger_disconnected(self):
log("Robot disconnected")
self.robot_state = self.DISCONNECTED
def __trigger_ready_to_program(self):
rospy.loginfo("Robot ready to program")
def __trigger_halted(self):
log("Halted")
def __on_packet(self, buf):
state = RobotState.unpack(buf)
self.last_state = state
#import deserialize; deserialize.pstate(self.last_state)
#log("Packet. Mode=%s" % state.robot_mode_data.robot_mode)
if not state.robot_mode_data.real_robot_enabled:
rospy.logfatal("Real robot is no longer enabled. Driver is fuxored")
time.sleep(2)
sys.exit(1)
###
# IO-Support is EXPERIMENTAL
#
# Notes:
# - Where are the flags coming from? Do we need flags? No, as 'prog' does not use them and other scripts are not running!
# - analog_input2 and analog_input3 are within ToolData
# - What to do with the different analog_input/output_range/domain?
# - Shall we have appropriate ur_msgs definitions in order to reflect MasterboardData, ToolData,...?
###
# Use information from the robot state packet to publish IOStates
msg = IOStates()
#gets digital in states
for i in range(0, 10):
msg.digital_in_states.append(DigitalIn(i, (state.masterboard_data.digital_input_bits & (1<<i))>>i))
#gets digital out states
for i in range(0, 10):
msg.digital_out_states.append(DigitalOut(i, (state.masterboard_data.digital_output_bits & (1<<i))>>i))
#gets analog_in[0] state
inp = state.masterboard_data.analog_input0 / MULT_analog_robotstate
msg.analog_in_states.append(Analog(0, inp))
#gets analog_in[1] state
inp = state.masterboard_data.analog_input1 / MULT_analog_robotstate
msg.analog_in_states.append(Analog(1, inp))
#gets analog_out[0] state
inp = state.masterboard_data.analog_output0 / MULT_analog_robotstate
msg.analog_out_states.append(Analog(0, inp))
#gets analog_out[1] state
inp = state.masterboard_data.analog_output1 / MULT_analog_robotstate
msg.analog_out_states.append(Analog(1, inp))
#print "Publish IO-Data from robot state data"
pub_io_states.publish(msg)
# Updates the state machine that determines whether we can program the robot.
can_execute = (state.robot_mode_data.robot_mode in [RobotMode.READY, RobotMode.RUNNING])
if self.robot_state == self.CONNECTED:
if can_execute:
self.__trigger_ready_to_program()
self.robot_state = self.READY_TO_PROGRAM
elif self.robot_state == self.READY_TO_PROGRAM:
if not can_execute:
self.robot_state = self.CONNECTED
elif self.robot_state == self.EXECUTING:
if not can_execute:
self.__trigger_halted()
self.robot_state = self.CONNECTED
# Report on any unknown packet types that were received
if len(state.unknown_ptypes) > 0:
state.unknown_ptypes.sort()
s_unknown_ptypes = [str(ptype) for ptype in state.unknown_ptypes]
self.throttle_warn_unknown(1.0, "Ignoring unknown pkt type(s): %s. "
"Please report." % ", ".join(s_unknown_ptypes))
def throttle_warn_unknown(self, period, msg):
self.__dict__.setdefault('_last_hit', 0.0)
# this only works for a single caller
if (self._last_hit + period) <= rospy.get_time():
self._last_hit = rospy.get_time()
rospy.logwarn(msg)
def __run(self):
while self.__keep_running:
r, _, _ = select.select([self.__sock], [], [], self.TIMEOUT)
if r:
more = self.__sock.recv(4096)
if more:
self.__buf = self.__buf + more
#unpack_from requires a buffer of at least 48 bytes
while len(self.__buf) >= 48:
# Attempts to extract a packet
packet_length, ptype = struct.unpack_from("!IB", self.__buf)
#print("PacketLength: ", packet_length, "; BufferSize: ", len(self.__buf))
if len(self.__buf) >= packet_length:
packet, self.__buf = self.__buf[:packet_length], self.__buf[packet_length:]
self.__on_packet(packet)
else:
break
else:
self.__trigger_disconnected()
self.__keep_running = False
else:
self.__trigger_disconnected()
self.__keep_running = False
class URConnectionRT(object):
TIMEOUT = 1.0
DISCONNECTED = 0
CONNECTED = 1
def __init__(self, hostname, port):
self.__thread = None
self.__sock = None
self.robot_state = self.DISCONNECTED
self.hostname = hostname
self.port = port
self.last_stateRT = None
def connect(self):
if self.__sock:
self.disconnect()
self.__buf = ""
self.robot_state = self.CONNECTED
self.__sock = socket.create_connection((self.hostname, self.port))
self.__keep_running = True
self.__thread = threading.Thread(name="URConnectionRT", target=self.__run)
self.__thread.daemon = True
self.__thread.start()
def disconnect(self):
if self.__thread:
self.__keep_running = False
self.__thread.join()
self.__thread = None
if self.__sock:
self.__sock.close()
self.__sock = None
self.last_state = None
self.robot_state = self.DISCONNECTED
def __trigger_disconnected(self):
log("Robot disconnected")
self.robot_state = self.DISCONNECTED
def __on_packet(self, buf):
global last_joint_states, last_joint_states_lock
now = rospy.get_rostime()
stateRT = RobotStateRT.unpack(buf)
self.last_stateRT = stateRT
msg = JointState()
msg.header.stamp = now
msg.header.frame_id = "From real-time state data"
msg.name = joint_names
msg.position = [0.0] * 6
for i, q in enumerate(stateRT.q_actual):
msg.position[i] = q + joint_offsets.get(joint_names[i], 0.0)
msg.velocity = stateRT.qd_actual
msg.effort = [0]*6
pub_joint_states.publish(msg)
with last_joint_states_lock:
last_joint_states = msg
wrench_msg = WrenchStamped()
wrench_msg.header.stamp = now
wrench_msg.wrench.force.x = stateRT.tcp_force[0]
wrench_msg.wrench.force.y = stateRT.tcp_force[1]
wrench_msg.wrench.force.z = stateRT.tcp_force[2]
wrench_msg.wrench.torque.x = stateRT.tcp_force[3]
wrench_msg.wrench.torque.y = stateRT.tcp_force[4]
wrench_msg.wrench.torque.z = stateRT.tcp_force[5]
pub_wrench.publish(wrench_msg)
def __run(self):
while self.__keep_running:
r, _, _ = select.select([self.__sock], [], [], self.TIMEOUT)
if r:
more = self.__sock.recv(4096)
if more:
self.__buf = self.__buf + more
#unpack_from requires a buffer of at least 48 bytes
while len(self.__buf) >= 48:
# Attempts to extract a packet
packet_length = struct.unpack_from("!i", self.__buf)[0]
#print("PacketLength: ", packet_length, "; BufferSize: ", len(self.__buf))
if len(self.__buf) >= packet_length:
packet, self.__buf = self.__buf[:packet_length], self.__buf[packet_length:]
self.__on_packet(packet)
else:
break
else:
self.__trigger_disconnected()
self.__keep_running = False
else:
self.__trigger_disconnected()
self.__keep_running = False
def setConnectedRobot(r):
global connected_robot, connected_robot_lock
with connected_robot_lock:
connected_robot = r
connected_robot_cond.notify()
def getConnectedRobot(wait=False, timeout=-1):
started = time.time()
with connected_robot_lock:
if wait:
while not connected_robot:
if timeout >= 0 and time.time() > started + timeout:
break
connected_robot_cond.wait(0.2)
return connected_robot
# Receives messages from the robot over the socket
class CommanderTCPHandler(SocketServer.BaseRequestHandler):
def recv_more(self):
global last_joint_states, last_joint_states_lock
while True:
r, _, _ = select.select([self.request], [], [], 0.2)
if r:
more = self.request.recv(4096)
if not more:
raise EOF("EOF on recv")
return more
else:
now = rospy.get_rostime()
if last_joint_states and \
last_joint_states.header.stamp < now - rospy.Duration(1.0):
rospy.logerr("Stopped hearing from robot (last heard %.3f sec ago). Disconnected" % \
(now - last_joint_states.header.stamp).to_sec())
raise EOF()
def handle(self):
self.__socket_lock = threading.Lock()
setConnectedRobot(self)
print "Handling a request"
try:
buf = self.recv_more()
if not buf: return
while True:
#print "Buf:", [ord(b) for b in buf]
# Unpacks the message type
mtype = struct.unpack_from("!i", buf, 0)[0]
buf = buf[4:]
#print "Message type:", mtype
if mtype == MSG_OUT:
# Unpacks string message, terminated by tilde
i = buf.find("~")
while i < 0:
buf = buf + self.recv_more()
i = buf.find("~")
if len(buf) > 2000:
raise Exception("Probably forgot to terminate a string: %s..." % buf[:150])
s, buf = buf[:i], buf[i+1:]
log("Out: %s" % s)
elif mtype == MSG_QUIT:
print "Quitting"
raise EOF("Received quit")
elif mtype == MSG_WAYPOINT_FINISHED:
while len(buf) < 4:
buf = buf + self.recv_more()
waypoint_id = struct.unpack_from("!i", buf, 0)[0]
buf = buf[4:]
print "Waypoint finished (not handled)"
else:
raise Exception("Unknown message type: %i" % mtype)
if not buf:
buf = buf + self.recv_more()
except EOF, ex:
print "Connection closed (command):", ex
setConnectedRobot(None)
def __send_message(self, data):
"""
Send a message to the robot.
The message is given as a list of integers that will be packed
as 4 bytes each in network byte order (big endian).
A lock is acquired before sending the message to prevent race conditions.
:param data: list of int, the data to send
"""
buf = struct.pack("!%ii" % len(data), *data)
with self.__socket_lock:
self.request.send(buf)
def send_quit(self):
self.__send_message([MSG_QUIT])
def send_servoj(self, waypoint_id, q_actual, t):
assert(len(q_actual) == 6)
q_robot = [0.0] * 6
for i, q in enumerate(q_actual):
q_robot[i] = q - joint_offsets.get(joint_names[i], 0.0)
params = [MSG_SERVOJ, waypoint_id] + \
[MULT_jointstate * qq for qq in q_robot] + \
[MULT_time * t]
self.__send_message(params)
#Experimental set_payload implementation
def send_payload(self,payload):
self.__send_message([MSG_SET_PAYLOAD, payload * MULT_payload])
#Experimental set_digital_output implementation
def set_digital_out(self, pinnum, value):
self.__send_message([MSG_SET_DIGITAL_OUT, pinnum, value])
time.sleep(IO_SLEEP_TIME)
def set_analog_out(self, pinnum, value):
self.__send_message([MSG_SET_ANALOG_OUT, pinnum, value * MULT_analog])
time.sleep(IO_SLEEP_TIME)
def set_tool_voltage(self, value):
self.__send_message([MSG_SET_TOOL_VOLTAGE, value, 0])
time.sleep(IO_SLEEP_TIME)
def set_flag(self, pin, val):
self.__send_message([MSG_SET_FLAG, pin, val])
#set_flag will fail if called too closely together--added delay
time.sleep(IO_SLEEP_TIME)
def send_stopj(self):
self.__send_message([MSG_STOPJ])
def set_waypoint_finished_cb(self, cb):
self.waypoint_finished_cb = cb
# Returns the last JointState message sent out
def get_joint_states(self):
global last_joint_states, last_joint_states_lock
return last_joint_states
class TCPServer(SocketServer.TCPServer):
allow_reuse_address = True # Allows the program to restart gracefully on crash
timeout = 5
# Waits until all threads have completed. Allows KeyboardInterrupt to occur
def joinAll(threads):
while any(t.isAlive() for t in threads):
for t in threads:
t.join(0.2)
# Returns the duration between moving from point (index-1) to point
# index in the given JointTrajectory
def get_segment_duration(traj, index):
if index == 0:
return traj.points[0].time_from_start.to_sec()
return (traj.points[index].time_from_start - traj.points[index-1].time_from_start).to_sec()
# Reorders the JointTrajectory traj according to the order in
# joint_names. Destructive.
def reorder_traj_joints(traj, joint_names):
order = [traj.joint_names.index(j) for j in joint_names]
new_points = []
for p in traj.points:
new_points.append(JointTrajectoryPoint(
positions = [p.positions[i] for i in order],
velocities = [p.velocities[i] for i in order] if p.velocities else [],
accelerations = [p.accelerations[i] for i in order] if p.accelerations else [],
time_from_start = p.time_from_start))
traj.joint_names = joint_names
traj.points = new_points
def interp_cubic(p0, p1, t_abs):
T = (p1.time_from_start - p0.time_from_start).to_sec()
t = t_abs - p0.time_from_start.to_sec()
q = [0] * 6
qdot = [0] * 6
qddot = [0] * 6
for i in range(len(p0.positions)):
a = p0.positions[i]
b = p0.velocities[i]
c = (-3*p0.positions[i] + 3*p1.positions[i] - 2*T*p0.velocities[i] - T*p1.velocities[i]) / T**2
d = (2*p0.positions[i] - 2*p1.positions[i] + T*p0.velocities[i] + T*p1.velocities[i]) / T**3
q[i] = a + b*t + c*t**2 + d*t**3
qdot[i] = b + 2*c*t + 3*d*t**2
qddot[i] = 2*c + 6*d*t
return JointTrajectoryPoint(positions=q, velocities=qdot, accelerations=qddot, time_from_start=rospy.Duration(t_abs))
# Returns (q, qdot, qddot) for sampling the JointTrajectory at time t.
# The time t is the time since the trajectory was started.
def sample_traj(traj, t):
# First point
if t <= 0.0:
return copy.deepcopy(traj.points[0])
# Last point
if t >= traj.points[-1].time_from_start.to_sec():
return copy.deepcopy(traj.points[-1])
# Finds the (middle) segment containing t
i = 0
while traj.points[i+1].time_from_start.to_sec() < t:
i += 1
return interp_cubic(traj.points[i], traj.points[i+1], t)
def traj_is_finite(traj):
for pt in traj.points:
for p in pt.positions:
if math.isinf(p) or math.isnan(p):
return False
for v in pt.velocities:
if math.isinf(v) or math.isnan(v):
return False
return True
def has_limited_velocities(traj):
for p in traj.points:
for v in p.velocities:
if math.fabs(v) > max_velocity:
return False
return True
def has_velocities(traj):
for p in traj.points:
if len(p.velocities) != len(p.positions):
return False
return True
def within_tolerance(a_vec, b_vec, tol_vec):
for a, b, tol in zip(a_vec, b_vec, tol_vec):
if abs(a - b) > tol:
return False
return True
class URServiceProvider(object):
def __init__(self, robot):
self.robot = robot
rospy.Service('ur_driver/setPayload', SetPayload, self.setPayload)
def set_robot(self, robot):
self.robot = robot
def setPayload(self, req):
if req.payload < min_payload or req.payload > max_payload:
print 'ERROR: Payload ' + str(req.payload) + ' out of bounds (' + str(min_payload) + ', ' + str(max_payload) + ')'
return False
if self.robot:
self.robot.send_payload(req.payload)
else:
return False
return True
class URTrajectoryFollower(object):
RATE = 0.02
def __init__(self, robot, goal_time_tolerance=None):
self.goal_time_tolerance = goal_time_tolerance or rospy.Duration(0.0)
self.joint_goal_tolerances = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
self.following_lock = threading.Lock()
self.T0 = time.time()
self.robot = robot
self.server = actionlib.ActionServer("follow_joint_trajectory",
FollowJointTrajectoryAction,
self.on_goal, self.on_cancel, auto_start=False)
self.goal_handle = None
self.traj = None
self.traj_t0 = 0.0
self.first_waypoint_id = 10
self.tracking_i = 0
self.pending_i = 0
self.last_point_sent = True
self.update_timer = rospy.Timer(rospy.Duration(self.RATE), self._update)
def set_robot(self, robot):
# Cancels any goals in progress
if self.goal_handle:
self.goal_handle.set_canceled()
self.goal_handle = None
self.traj = None
self.robot = robot
if self.robot:
self.init_traj_from_robot()
# Sets the trajectory to remain stationary at the current position
# of the robot.
def init_traj_from_robot(self):
if not self.robot: raise Exception("No robot connected")
# Busy wait (avoids another mutex)
state = self.robot.get_joint_states()
while not state:
time.sleep(0.1)
state = self.robot.get_joint_states()
self.traj_t0 = time.time()
self.traj = JointTrajectory()
self.traj.joint_names = joint_names
self.traj.points = [JointTrajectoryPoint(
positions = state.position,
velocities = [0] * 6,
accelerations = [0] * 6,
time_from_start = rospy.Duration(0.0))]
def start(self):
self.init_traj_from_robot()
self.server.start()
print "The action server for this driver has been started"
def on_goal(self, goal_handle):
log("on_goal")
# Checks that the robot is connected
if not self.robot:
rospy.logerr("Received a goal, but the robot is not connected")
goal_handle.set_rejected()
return
# Checks if the joints are just incorrect
if set(goal_handle.get_goal().trajectory.joint_names) != set(joint_names):
rospy.logerr("Received a goal with incorrect joint names: (%s)" % \
', '.join(goal_handle.get_goal().trajectory.joint_names))
goal_handle.set_rejected()
return
if not traj_is_finite(goal_handle.get_goal().trajectory):
rospy.logerr("Received a goal with infinites or NaNs")
goal_handle.set_rejected(text="Received a goal with infinites or NaNs")
return
# Checks that the trajectory has velocities
if not has_velocities(goal_handle.get_goal().trajectory):
rospy.logerr("Received a goal without velocities")
goal_handle.set_rejected(text="Received a goal without velocities")
return
# Checks that the velocities are withing the specified limits
if not has_limited_velocities(goal_handle.get_goal().trajectory):
message = "Received a goal with velocities that are higher than %f" % max_velocity
rospy.logerr(message)
goal_handle.set_rejected(text=message)
return
# Orders the joints of the trajectory according to joint_names
reorder_traj_joints(goal_handle.get_goal().trajectory, joint_names)
with self.following_lock:
if self.goal_handle:
# Cancels the existing goal
self.goal_handle.set_canceled()
self.first_waypoint_id += len(self.goal_handle.get_goal().trajectory.points)
self.goal_handle = None
# Inserts the current setpoint at the head of the trajectory
now = time.time()
point0 = sample_traj(self.traj, now - self.traj_t0)
point0.time_from_start = rospy.Duration(0.0)
goal_handle.get_goal().trajectory.points.insert(0, point0)
self.traj_t0 = now
# Replaces the goal
self.goal_handle = goal_handle
self.traj = goal_handle.get_goal().trajectory
self.goal_handle.set_accepted()
def on_cancel(self, goal_handle):
log("on_cancel")
if goal_handle == self.goal_handle:
with self.following_lock:
# Uses the next little bit of trajectory to slow to a stop
STOP_DURATION = 0.5
now = time.time()
point0 = sample_traj(self.traj, now - self.traj_t0)
point0.time_from_start = rospy.Duration(0.0)
point1 = sample_traj(self.traj, now - self.traj_t0 + STOP_DURATION)
point1.velocities = [0] * 6
point1.accelerations = [0] * 6
point1.time_from_start = rospy.Duration(STOP_DURATION)
self.traj_t0 = now
self.traj = JointTrajectory()
self.traj.joint_names = joint_names
self.traj.points = [point0, point1]
self.goal_handle.set_canceled()
self.goal_handle = None
else:
goal_handle.set_canceled()
last_now = time.time()
def _update(self, event):
if self.robot and self.traj:
now = time.time()
if (now - self.traj_t0) <= self.traj.points[-1].time_from_start.to_sec():
self.last_point_sent = False #sending intermediate points
setpoint = sample_traj(self.traj, now - self.traj_t0)
try:
self.robot.send_servoj(999, setpoint.positions, 4 * self.RATE)
except socket.error:
pass
elif not self.last_point_sent:
# All intermediate points sent, sending last point to make sure we
# reach the goal.
# This should solve an issue where the robot does not reach the final
# position and errors out due to not reaching the goal point.
last_point = self.traj.points[-1]
state = self.robot.get_joint_states()
position_in_tol = within_tolerance(state.position, last_point.positions, self.joint_goal_tolerances)
# Performing this check to try and catch our error condition. We will always
# send the last point just in case.
if not position_in_tol:
rospy.logwarn("Trajectory time exceeded and current robot state not at goal, last point required")
rospy.logwarn("Current trajectory time: %s, last point time: %s" % \
(now - self.traj_t0, self.traj.points[-1].time_from_start.to_sec()))
rospy.logwarn("Desired: %s\nactual: %s\nvelocity: %s" % \
(last_point.positions, state.position, state.velocity))
setpoint = sample_traj(self.traj, self.traj.points[-1].time_from_start.to_sec())
try:
self.robot.send_servoj(999, setpoint.positions, 4 * self.RATE)
self.last_point_sent = True
except socket.error:
pass
else: # Off the end
if self.goal_handle:
last_point = self.traj.points[-1]
state = self.robot.get_joint_states()
position_in_tol = within_tolerance(state.position, last_point.positions, [0.1]*6)
velocity_in_tol = within_tolerance(state.velocity, last_point.velocities, [0.05]*6)
if position_in_tol and velocity_in_tol:
# The arm reached the goal (and isn't moving). Succeeding
self.goal_handle.set_succeeded()
self.goal_handle = None
#elif now - (self.traj_t0 + last_point.time_from_start.to_sec()) > self.goal_time_tolerance.to_sec():
# # Took too long to reach the goal. Aborting
# rospy.logwarn("Took too long to reach the goal.\nDesired: %s\nactual: %s\nvelocity: %s" % \
# (last_point.positions, state.position, state.velocity))
# self.goal_handle.set_aborted(text="Took too long to reach the goal")
# self.goal_handle = None
# joint_names: list of joints
#
# returns: { "joint_name" : joint_offset }
def load_joint_offsets(joint_names):
from lxml import etree
robot_description = rospy.get_param("robot_description")
doc = etree.fromstring(robot_description)
# select only 'calibration_offset' elements whose parent is a joint
# element with a specific value for the name attribute
expr = "/robot/joint[@name=$name]/calibration_offset"
result = {}
for joint in joint_names:
joint_elt = doc.xpath(expr, name=joint)
if len(joint_elt) == 1:
calibration_offset = float(joint_elt[0].get("value"))
result[joint] = calibration_offset
rospy.loginfo("Found calibration offset for joint \"%s\": %.4f" % (joint, calibration_offset))
elif len(joint_elt) > 1:
rospy.logerr("Too many joints matched on \"%s\". Please report to package maintainer(s)." % joint)
else:
rospy.logwarn("No calibration offset for joint \"%s\"" % joint)
return result
def get_my_ip(robot_ip, port):
s = socket.create_connection((robot_ip, port))
tmp = s.getsockname()[0]
s.close()
return tmp
def handle_set_io(req):
r = getConnectedRobot(wait=False)
if r:
if req.fun == FUN_SET_DIGITAL_OUT:
r.set_digital_out(req.pin, req.state)
return True
elif req.fun == FUN_SET_FLAG:
r.set_flag(req.pin, req.state)
return True
elif req.fun == FUN_SET_ANALOG_OUT:
r.set_analog_out(req.pin, req.state)
return True
elif req.fun == FUN_SET_TOOL_VOLTAGE:
r.set_tool_voltage(req.pin)
return True
else:
raise ROSServiceException("Robot not connected")
def set_io_server():
s= rospy.Service('set_io', SetIO, handle_set_io)
def reconfigure_callback(config, level):
global prevent_programming
prevent_programming = config.prevent_programming
## What about updating the value on the parameter server?
return config
def main():
rospy.init_node('ur_driver', disable_signals=True)
if rospy.get_param("use_sim_time", False):
rospy.logwarn("use_sim_time is set!!!")
global prevent_programming
reconfigure_srv = Server(URDriverConfig, reconfigure_callback)
prefix = rospy.get_param("~prefix", "")
print "Setting prefix to %s" % prefix
global joint_names
joint_names = [prefix + name for name in JOINT_NAMES]
# Parses command line arguments
parser = optparse.OptionParser(usage="usage: %prog robot_hostname [reverse_port]")
(options, args) = parser.parse_args(rospy.myargv()[1:])
if len(args) < 1:
parser.error("You must specify the robot hostname")
elif len(args) == 1:
robot_hostname = args[0]
reverse_port = DEFAULT_REVERSE_PORT
elif len(args) == 2:
robot_hostname = args[0]
reverse_port = int(args[1])
if not (0 <= reverse_port <= 65535):
parser.error("You entered an invalid port number")
else:
parser.error("Wrong number of parameters")
# Reads the calibrated joint offsets from the URDF
global joint_offsets
joint_offsets = load_joint_offsets(joint_names)
if len(joint_offsets) > 0:
rospy.loginfo("Loaded calibration offsets from urdf: %s" % joint_offsets)
else:
rospy.loginfo("No calibration offsets loaded from urdf")
# Reads the maximum velocity
# The max_velocity parameter is only used for debugging in the ur_driver. It's not related to actual velocity limits
global max_velocity
max_velocity = rospy.get_param("~max_velocity", MAX_VELOCITY) # [rad/s]
rospy.loginfo("Max velocity accepted by ur_driver: %s [rad/s]" % max_velocity)
# Reads the minimum payload
global min_payload
min_payload = rospy.get_param("~min_payload", MIN_PAYLOAD)
# Reads the maximum payload
global max_payload
max_payload = rospy.get_param("~max_payload", MAX_PAYLOAD)
rospy.loginfo("Bounds for Payload: [%s, %s]" % (min_payload, max_payload))
# Sets up the server for the robot to connect to
server = TCPServer(("", reverse_port), CommanderTCPHandler)
thread_commander = threading.Thread(name="CommanderHandler", target=server.serve_forever)
thread_commander.daemon = True
thread_commander.start()
with open(roslib.packages.get_pkg_dir('ur_driver') + '/prog') as fin:
program = fin.read() % {"driver_hostname": get_my_ip(robot_hostname, PORT), "driver_reverseport": reverse_port}
connection = URConnection(robot_hostname, PORT, program)
connection.connect()
connection.send_reset_program()
connectionRT = URConnectionRT(robot_hostname, RT_PORT)
connectionRT.connect()
set_io_server()
service_provider = None
action_server = None
try:
while not rospy.is_shutdown():
# Checks for disconnect
if getConnectedRobot(wait=False):
time.sleep(0.2)
try:
prevent_programming = rospy.get_param("~prevent_programming")
update = {'prevent_programming': prevent_programming}
reconfigure_srv.update_configuration(update)
except KeyError, ex:
print "Parameter 'prevent_programming' not set. Value: " + str(prevent_programming)
pass
if prevent_programming:
print "Programming now prevented"
connection.send_reset_program()
else:
print "Disconnected. Reconnecting"
if action_server:
action_server.set_robot(None)
rospy.loginfo("Programming the robot")
while True:
# Sends the program to the robot
while not connection.ready_to_program():
print "Waiting to program"
time.sleep(1.0)
try:
prevent_programming = rospy.get_param("~prevent_programming")
update = {'prevent_programming': prevent_programming}
reconfigure_srv.update_configuration(update)
except KeyError, ex:
print "Parameter 'prevent_programming' not set. Value: " + str(prevent_programming)
pass
connection.send_program()
r = getConnectedRobot(wait=True, timeout=1.0)
if r:
break
rospy.loginfo("Robot connected")
#provider for service calls
if service_provider:
service_provider.set_robot(r)
else:
service_provider = URServiceProvider(r)
if action_server:
action_server.set_robot(r)
else:
action_server = URTrajectoryFollower(r, rospy.Duration(1.0))
action_server.start()
except KeyboardInterrupt:
try:
r = getConnectedRobot(wait=False)
rospy.signal_shutdown("KeyboardInterrupt")
if r: r.send_quit()
except:
pass
raise
if __name__ == '__main__': main()
| {
"content_hash": "2f6324834f0898d5748aa109f140e9ec",
"timestamp": "",
"source": "github",
"line_count": 1022,
"max_line_length": 129,
"avg_line_length": 38.10665362035225,
"alnum_prop": 0.5740659904994223,
"repo_name": "ibaranov-cp/ridgeback_ur10",
"id": "0ee7e7d45e0944b437116f33178b9cc8c02a74e3",
"size": "38967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "universal_robot/ur_driver/src/ur_driver/driver.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "238513"
},
{
"name": "CMake",
"bytes": "15105"
},
{
"name": "Python",
"bytes": "92748"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
} |
from imports import *
def nick_exist(nick):
"""Check if the nickname exists
Args:
nick: the nickname user entered
Returns:
Whether there are same nick name in database
"""
result = db_session.query(User).filter_by(nick=nick).all()
if len(result) > 0:
return True
else:
return False
def is_chinese(uchar):
"""判断一个unicode是否是汉字
Args:
uchar: the char to check
Returns:
Whether the uchar in a Chinese character
"""
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
else:
return False
def nick_validate(nick):
"""Check if the nickname is valid
Args:
nick: the nickname to be checked
Returns:
Whether the nickname is valid
"""
format = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
for char in nick:
if (char not in format) and (not is_chinese(char)):
return False
return True
def email_exist(email):
"""Check if the email exists
Args:
email: the email to be checked
Returns:
Whether the email exists
"""
result = db_session.query(User).filter_by(email=email).all()
if len(result) > 0:
return True
else:
return False
def cardID_exist(cardID):
"""Check if the cardID exists
Args:
cardID: the cardID
Returns:
Whether the cardID exists
"""
result = db_session.query(User).filter_by(member_id=cardID).all()
if len(result) > 0:
return True
else:
return False
ALLOWED_EXTENSIONS = ['jpg', 'png']
def allowed_file(filename):
"""Check if the file name is valid
Args:
filename: the file to be checked
Returns:
Whether the filename is valid
"""
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def get_state(nick, password):
"""Get the state of the user
Args:
nick: the nickname of the user
password: the password of the user
Returns:
The state of the user or False
"""
result = db_session.query(User).filter(
and_(User.nick == nick, User.password == password)).all()
if len(result) > 0:
return result[0].state
else:
return False
def update_state(nick):
"""Update the state by nickname
Args:
nick: the nickname of the user
"""
db_session.query(User).filter(User.nick == nick).update({'state': '1'})
db_session.commit()
def get_secure_photoname(filename):
"""Get the secured photo name
Args:
filename: the filename of the photo
Returns:
The secured photo name
"""
secured_filename = secure_filename(filename)
photoname = secured_filename.rsplit('.', 1)[
0] + datetime.now().strftime('%Y%m%d%H%M%S') + '.' + secured_filename.rsplit('.', 1)[1]
return photoname
def send_verify_email(nick, password, email):
"""Send verify email
Args:
nick: the nickname of the new user
password: the password of the new user
email: the email of the new user
Returns:
Whether the email is sent successfully
"""
verify_url = app.config['HOST_NAME'] + \
'/verify?nick=' + nick + '&secret=' + password
mail = Mail(app)
msg = Message(u'曦潮书店', sender=app.config['ADMINS'][0], recipients=[email])
msg.body = 'text body'
msg.html = render_template(
'test_verify_email.html', verify_url=verify_url, nick=nick)
with app.app_context():
try:
mail.send(msg)
return True
except Exception, e:
print "\n\n\n\n\n\n", "NoNoNoNoNoNoNo!", "\n\n\n\n\n\n"
print str(e)
return False
| {
"content_hash": "061349c73c2bc0a5b244d10cf64c012c",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 95,
"avg_line_length": 21.505681818181817,
"alnum_prop": 0.5928665785997358,
"repo_name": "NewBeeStudio/xichao-new",
"id": "d3f00380a27dd56f8b543cf909674feb350eebcd",
"size": "3835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xichao/packages/function/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "304421"
},
{
"name": "HTML",
"bytes": "614986"
},
{
"name": "JavaScript",
"bytes": "1455427"
},
{
"name": "PHP",
"bytes": "18819"
},
{
"name": "Python",
"bytes": "222994"
}
],
"symlink_target": ""
} |
class BuildingPoint(object):
def __init__(self, point, is_start, height):
self.point = point;
self.is_start = is_start
self.height = height
def __lt__(self, other):
if self.point != other.point:
return self.point < other.point
else:
if self.is_start:
h1 = -self.height
else:
h1 = self.height
if other.is_start:
h2 = -other.height;
else:
h2 = other.height
return h1 < h2
def get_skyline(buildings):
building_points = []
for building in buildings:
building_points.append(BuildingPoint(building[0], True, building[2]))
building_points.append(BuildingPoint(building[1], False, building[2]))
building_points = sorted(building_points)
queue = {}
queue[0] = 1
prev_max_height = 0
result = []
for building_point in building_points:
if building_point.is_start:
if building_point.height in queue:
queue[building_point.height] = queue[building_point.height] + 1
else:
queue[building_point.height] = 1
else:
if queue[building_point.height] == 1:
del queue[building_point.height]
else:
queue[building_point.height] = queue[building_point.height] - 1
current_max_height = max(queue.keys())
if prev_max_height != current_max_height:
result.append([building_point.point, current_max_height])
prev_max_height = current_max_height
return result
if __name__ == '__main__':
buildings = [[1, 3, 4], [3, 4, 4], [2, 6, 2], [8, 11, 4], [7, 9, 3], [10, 11, 2]]
print(get_skyline(buildings))
| {
"content_hash": "2547b90c299371f88bcda0e02ffa8c5a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 85,
"avg_line_length": 30.183333333333334,
"alnum_prop": 0.5389287686361126,
"repo_name": "rtkasodariya/interview",
"id": "049bb4bfb45bbe44e090ab1baf00fa1c6a308def",
"size": "1865",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/geometry/skylinedrawing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "844400"
},
{
"name": "Python",
"bytes": "53011"
}
],
"symlink_target": ""
} |
"""Similar to the last problem, the bulk of 3.py is scraping a large comment
from the source of the webpage in order to find the name of the next webpage
to visit. The hint this time is to look for one "small letter" surrounded
by exactly three "big bodyguards" (letters) on each of its sides. To find all
matches of this pattern, construct a regular expression that will match to
exactly three uppercase letters, a single lowercase letter, and an additional
three uppercase letters. We'll preserve the named group for the lowercase
letters then join them together to get the name of the next webpage."""
import re
import requests
import webbrowser
from bs4 import BeautifulSoup, Comment
# Construct the regular expression.
# Ensure that there are only three capiltal letters surrounding the lowercase
# letter on either side; no more, no less.
regex = "[^A-Z][A-Z]{3}(?P<letter>[a-z])[A-Z]{3}[^A-Z]"
# Send a get request to the webpage and construct a parse tree from its content.
webpage = "http://www.pythonchallenge.com/pc/def/equality.html"
r = requests.get(webpage)
soup = BeautifulSoup(r.content, "html.parser")
# Find the mess of characters to search.
chars = soup.find(string=lambda text: isinstance(text, Comment))
# Find all matches within the characters. re.findall returns a list containing
# all of the contents of each named group in order of appearance; join this
# list together to construct the name of the next webpage.
msg = "".join(re.findall(regex, chars))
print(msg)
# When viewing the new webpage, it informs us that the file is no longer an
# html file, but a php file. Change the destination webpage accordingly.
split_page = webpage.split("equality")
split_page[1] = split_page[1].replace("html", "php")
webbrowser.open(split_page[0] + msg + split_page[1])
| {
"content_hash": "e7433cfe42d6434d22247386819b024c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 48.4054054054054,
"alnum_prop": 0.7604690117252931,
"repo_name": "cjonsmith/python-challenge",
"id": "5b104704de914379e176b538d5fcacaa9236bbc0",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem_03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9930"
}
],
"symlink_target": ""
} |
import unittest
from datetime import datetime
from unittest.mock import patch
from airflow.exceptions import AirflowException
from airflow.models import DAG, Connection
from airflow.providers.qubole.sensors.qubole import QuboleFileSensor, QubolePartitionSensor
from airflow.utils import db
DAG_ID = "qubole_test_dag"
TASK_ID = "test_task"
DEFAULT_CONN = "qubole_default"
TEMPLATE_CONN = "my_conn_id"
DEFAULT_DATE = datetime(2017, 1, 1)
class TestQuboleSensor(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(conn_id=DEFAULT_CONN, conn_type='HTTP'))
@patch('airflow.providers.qubole.sensors.qubole.QuboleFileSensor.poke')
def test_file_sensore(self, patched_poke):
patched_poke.return_value = True
sensor = QuboleFileSensor(
task_id='test_qubole_file_sensor',
data={"files": ["s3://some_bucket/some_file"]}
)
self.assertTrue(sensor.poke({}))
@patch('airflow.providers.qubole.sensors.qubole.QubolePartitionSensor.poke')
def test_partition_sensor(self, patched_poke):
patched_poke.return_value = True
sensor = QubolePartitionSensor(
task_id='test_qubole_partition_sensor',
data={
"schema": "default",
"table": "my_partitioned_table",
"columns": [{"column": "month", "values": ["1", "2"]}]
}
)
self.assertTrue(sensor.poke({}))
@patch('airflow.providers.qubole.sensors.qubole.QubolePartitionSensor.poke')
def test_partition_sensor_error(self, patched_poke):
patched_poke.return_value = True
dag = DAG(DAG_ID, start_date=DEFAULT_DATE)
with self.assertRaises(AirflowException):
QubolePartitionSensor(
task_id='test_qubole_partition_sensor',
poke_interval=1,
data={
"schema": "default",
"table": "my_partitioned_table",
"columns": [{"column": "month", "values": ["1", "2"]}]
},
dag=dag
)
| {
"content_hash": "118e5c7c9cce3c61788898d3b45000f4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 91,
"avg_line_length": 34.08064516129032,
"alnum_prop": 0.608140085186938,
"repo_name": "wooga/airflow",
"id": "646d16a70b16dcea0c2f62966d00c9e6e392f5fa",
"size": "2903",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/providers/qubole/sensors/test_qubole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from builtins import object
from math import *
from proteus import *
from proteus.default_p import *
try:
from .cylinder import *
except:
from cylinder import *
from proteus.mprans import PresInit
#domain = ctx.domain
#nd = ctx.nd
name = "pressureInitial"
coefficients=PresInit.Coefficients(nd=nd,
modelIndex=PINIT_model,
fluidModelIndex=V_model,
pressureModelIndex=PRESSURE_model)
#pressure increment should be zero on any pressure dirichlet boundaries
def getDBC_pInit(x,flag):
if flag in [boundaryTags['right']]: #,boundaryTags['left'],boundaryTags['front'], boundaryTags['back']]:
return lambda x,t: 0.0
#the advectiveFlux should be zero on any no-flow boundaries
def getAdvectiveFlux_pInit(x,flag):
if flag != boundaryTags['right']:
return lambda x,t: 0.0
def getDiffusiveFlux_pInit(x,flag):
if flag != boundaryTags['right']:
return lambda x,t: 0.0
class getIBC_pInit(object):
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:getIBC_pInit()}
dirichletConditions = {0:getDBC_pInit }
advectiveFluxBoundaryConditions = {0:getAdvectiveFlux_pInit}
diffusiveFluxBoundaryConditions = {0:{0:getDiffusiveFlux_pInit}}
| {
"content_hash": "d9bedd762a69b07cdf1b6b0a7700e47d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 108,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.671062271062271,
"repo_name": "erdc/proteus",
"id": "e285e017bb9646e3434811419b8640d8dfdb8bc4",
"size": "1365",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "proteus/tests/cylinder2D/sbm_method/pressureInitial_p.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2790"
},
{
"name": "Asymptote",
"bytes": "1569"
},
{
"name": "C",
"bytes": "2827957"
},
{
"name": "C++",
"bytes": "7262408"
},
{
"name": "Cython",
"bytes": "154607"
},
{
"name": "Dockerfile",
"bytes": "2738"
},
{
"name": "Fortran",
"bytes": "51671"
},
{
"name": "Jupyter Notebook",
"bytes": "33357"
},
{
"name": "Makefile",
"bytes": "19043"
},
{
"name": "Python",
"bytes": "12534530"
},
{
"name": "Roff",
"bytes": "322"
},
{
"name": "Shell",
"bytes": "14084"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sentry.models import GroupHash
from sentry.testutils import TestCase
class GroupTest(TestCase):
def test_fetch_and_record_last_processed_event_id(self):
group = self.group
grouphash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
GroupHash.record_last_processed_event_id(
grouphash.id,
'event',
)
assert GroupHash.fetch_last_processed_event_id(
[grouphash.id, -1],
) == ['event', None]
| {
"content_hash": "a1ee19a5e5042ef6afc2908f20a7967b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 24.916666666666668,
"alnum_prop": 0.5919732441471572,
"repo_name": "gencer/sentry",
"id": "6a108087713bb97a17ea52983cc8949ecc7f7681",
"size": "598",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/models/test_grouphash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
} |
import pytest
from django.test.utils import override_settings
from django.urls import reverse
from democracy.models import Hearing
@pytest.mark.django_db
def test_hearing_delete_action(admin_client, default_hearing):
change_url = reverse('admin:democracy_hearing_changelist')
data = {'action': 'delete_selected', '_selected_action': [default_hearing.pk]}
response = admin_client.post(change_url, data, follow=True)
assert response.status_code == 200
assert 'Are you sure?' in response.rendered_content
assert 'Hearings: 1' in response.rendered_content
assert 'Hearing Translations: 1' in response.rendered_content
assert "Contact person orders: 1" in response.rendered_content
assert 'Sections: 1' in response.rendered_content
data['post'] = ' yes'
response = admin_client.post(change_url, data, follow=True)
assert response.status_code == 200
assert 'Successfully deleted 1 hearing.' in response.rendered_content
default_hearing = Hearing.objects.everything().get(pk=default_hearing.pk)
assert default_hearing.deleted is True
# TODO test section / section image inline soft delete somehow? it seems a bit complicated
| {
"content_hash": "f4ff57f2699adce154b4acc8f120a498",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 90,
"avg_line_length": 38.29032258064516,
"alnum_prop": 0.7447346251053075,
"repo_name": "City-of-Helsinki/kerrokantasi",
"id": "c7e737dc42c94caa7246b099278c3675ace05bfd",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "democracy/tests/test_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1023"
},
{
"name": "Dockerfile",
"bytes": "2096"
},
{
"name": "HTML",
"bytes": "11723"
},
{
"name": "JavaScript",
"bytes": "210967"
},
{
"name": "Python",
"bytes": "671102"
},
{
"name": "Shell",
"bytes": "8897"
}
],
"symlink_target": ""
} |
'''
(By default, this script will be automatically invoked by download_fddb.sh)
Manually invoke this script to generate image list and annotation files with
absolute paths.
Please make sure this script stays aside with a folder fddb which contains
sub-folders: 2002, 2003, FDDB-folds.
Invoke this script again after you move the directory of images.
'''
import os
assert os.path.exists("fddb/2002"), "fddb iamge folder 'fddb/2002' not found"
assert os.path.exists("fddb/2003"), "fddb iamge folder 'fddb/2003' not found"
assert os.path.exists("fddb/FDDB-folds"), "fddb annotation folder 'fddb/FDDB-folds' not found"
os.chdir('fddb')
prefix = os.getcwd()
suffix = ".jpg"
x=os.popen("pwd | sed 's/ /\\ /g'").read().strip()
img_list_files = os.popen("find '{}/FDDB-folds' -name '*[0-9].txt'".format(x)).read().split('\n')
anot_list_files = os.popen("find '{}/FDDB-folds' -name '*List.txt'".format(x)).read().split('\n')
s = ""
for p in img_list_files:
if p == '' :
continue
with open(p,'r') as f:
lines = f.readlines()
lines = [os.path.join(prefix, l.replace('\n','')+suffix) for l in lines]
s += "\n".join(lines)+'\n'
with open("./MergedImagePath.txt",'w') as f:
f.write(s)
s = ""
for p in anot_list_files:
if p == '' :
continue
with open(p,'r') as f:
lines = f.readlines()
lines = [l.replace('\n','') for l in lines]
for i in range(len(lines)):
l = lines[i]
if(len(l.split("/")) == 5):
lines[i] = os.path.join(prefix, l+suffix)
s += '\n'.join(lines)+'\n'
with open("./MergedAnnotations.txt",'w') as f:
f.write(s)
print "Merged image path wrote to: "+os.path.join(os.getcwd(),"MergedImagePath.txt")
print "Merged annotations wrote to: "+os.path.join(os.getcwd(),"MergedAnnotations.txt")
| {
"content_hash": "479fff371a3617d62ba4f3e6ddda2b9f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 97,
"avg_line_length": 30.133333333333333,
"alnum_prop": 0.6299778761061947,
"repo_name": "Microos/FaceAnnotationTool",
"id": "d75141d990afac921b02ba1d275179d479e9e81d",
"size": "1808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen_absolute_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "57605"
},
{
"name": "Python",
"bytes": "3296"
},
{
"name": "Shell",
"bytes": "349"
}
],
"symlink_target": ""
} |
import os
import os.path
import tempfile
import shutil
import json
from nose.tools import eq_
from nose.tools import with_setup
from build_pack_utils import utils
from common.integration import ErrorHelper
from common.components import BuildPackAssertHelper
from common.components import HttpdAssertHelper
from common.components import PhpAssertHelper
from common.components import NoWebServerAssertHelper
from common.components import NewRelicAssertHelper
from common.components import HhvmAssertHelper
from common.components import DownloadAssertHelper
from common.base import BaseCompileApp
newrelic = utils.load_extension('extensions/newrelic')
class TestNewRelic(object):
def setUp(self):
self.build_dir = tempfile.mkdtemp('build-')
self.php_dir = os.path.join(self.build_dir, 'php', 'etc')
os.makedirs(self.php_dir)
shutil.copy('defaults/config/php/5.4.x/php.ini', self.php_dir)
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def testDefaults(self):
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php'
}))
eq_(True, 'NEWRELIC_HOST' in nr._ctx.keys())
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
eq_(True, 'NEWRELIC_PACKAGE' in nr._ctx.keys())
eq_(True, 'NEWRELIC_DOWNLOAD_URL' in nr._ctx.keys())
eq_(True, 'NEWRELIC_HASH_DOWNLOAD_URL' in nr._ctx.keys())
eq_(True, 'NEWRELIC_STRIP' in nr._ctx.keys())
def testShouldNotInstall(self):
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir
}))
eq_(False, nr.should_install())
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstall(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'NEWRELIC_LICENSE': 'JUNK_LICENSE',
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20100525',
nr._php_extn_dir)
eq_(False, nr._php_zts)
eq_('20100525', nr._php_api)
eq_('@{HOME}/newrelic/agent/x64/newrelic-20100525.so', nr.newrelic_so)
eq_('app-name-1', nr.app_name)
eq_('JUNK_LICENSE', nr.license_key)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstallService(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'VCAP_SERVICES': {
'newrelic': [{
'name': 'newrelic',
'label': 'newrelic',
'tags': ['Monitoring'],
'plan': 'standard',
'credentials': {'licenseKey': 'LICENSE'}}]
},
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20100525',
nr._php_extn_dir)
eq_(False, nr._php_zts)
eq_('20100525', nr._php_api)
eq_('@{HOME}/newrelic/agent/x64/newrelic-20100525.so', nr.newrelic_so)
eq_('app-name-1', nr.app_name)
eq_('LICENSE', nr.license_key)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstallServiceAndManual(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'VCAP_SERVICES': {
'newrelic': [{
'name': 'newrelic',
'label': 'newrelic',
'tags': ['Monitoring'],
'plan': 'standard',
'credentials': {'licenseKey': 'LICENSE'}}]
},
'NEWRELIC_LICENSE': 'LICENSE2',
'VCAP_APPLICATION': {
'name': 'app-name-2'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20100525',
nr._php_extn_dir)
eq_(False, nr._php_zts)
eq_('20100525', nr._php_api)
eq_('@{HOME}/newrelic/agent/x64/newrelic-20100525.so', nr.newrelic_so)
eq_('app-name-2', nr.app_name)
eq_('LICENSE2', nr.license_key)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testModifyPhpIni(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'NEWRELIC_LICENSE': 'JUNK_LICENSE',
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
nr.modify_php_ini()
with open(os.path.join(self.php_dir, 'php.ini'), 'rt') as php_ini:
lines = php_ini.readlines()
eq_(True, lines.index('extension=%s\n' % nr.newrelic_so) >= 0)
eq_(True, lines.index('[newrelic]\n') >= 0)
eq_(True, lines.index('newrelic.license=JUNK_LICENSE\n') >= 0)
eq_(True, lines.index('newrelic.appname=%s\n' % nr.app_name) >= 0)
class TestNewRelicCompiled(BaseCompileApp):
def __init__(self):
self.app_name = 'app-1'
def setUp(self):
BaseCompileApp.setUp(self)
self.opts.set_newrelic_download_url(
'{DOWNLOAD_URL}/newrelic/{NEWRELIC_VERSION}/{NEWRELIC_PACKAGE}')
os.environ['NEWRELIC_LICENSE'] = 'JUNK_LICENSE'
os.environ['VCAP_APPLICATION'] = json.dumps({
'name': 'app-name-1'
})
def test_with_httpd_and_newrelic(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
nr = NewRelicAssertHelper()
httpd = HttpdAssertHelper()
php = PhpAssertHelper()
# set web server to httpd, since that's what we're expecting here
self.opts.set_web_server('httpd')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(22, 2).assert_downloads_from_output(output)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
httpd.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
bp.assert_config_options(self.build_dir)
# check env & proc files
httpd.assert_contents_of_procs_file(self.build_dir)
httpd.assert_contents_of_env_file(self.build_dir)
php.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
httpd.assert_web_dir_exists(self.build_dir, self.opts.get_webdir())
# check php & httpd installed
httpd.assert_files_installed(self.build_dir)
php.assert_files_installed(self.build_dir)
nr.assert_files_installed(self.build_dir)
def test_with_httpd_hhvm_and_newrelic(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
nr = NewRelicAssertHelper()
httpd = HttpdAssertHelper()
hhvm = HhvmAssertHelper()
# set web server to httpd, since that's what we're expecting here
self.opts.set_php_vm('hhvm')
self.opts.set_hhvm_download_url(
'{DOWNLOAD_URL}/hhvm/{HHVM_VERSION}/{HHVM_PACKAGE}')
self.opts.set_web_server('httpd')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(16, 2).assert_downloads_from_output(output)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
httpd.assert_start_script_is_correct(self.build_dir)
hhvm.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
bp.assert_config_options(self.build_dir)
# check env & proc files
httpd.assert_contents_of_procs_file(self.build_dir)
httpd.assert_contents_of_env_file(self.build_dir)
hhvm.assert_contents_of_procs_file(self.build_dir)
hhvm.assert_contents_of_env_file(self.build_dir)
# webdir exists
httpd.assert_web_dir_exists(self.build_dir, self.opts.get_webdir())
# check php & httpd installed
httpd.assert_files_installed(self.build_dir)
hhvm.assert_files_installed(self.build_dir)
# Test NewRelic should not be installed w/HHVM
nr.is_not_installed(self.build_dir)
class TestNewRelicWithApp5(BaseCompileApp):
def __init__(self):
self.app_name = 'app-5'
def setUp(self):
BaseCompileApp.setUp(self)
self.opts.set_newrelic_download_url(
'{DOWNLOAD_URL}/newrelic/{NEWRELIC_VERSION}/{NEWRELIC_PACKAGE}')
os.environ['NEWRELIC_LICENSE'] = 'JUNK_LICENSE'
os.environ['VCAP_APPLICATION'] = json.dumps({
'name': 'app-name-1'
})
def test_standalone(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
php = PhpAssertHelper()
none = NoWebServerAssertHelper()
nr = NewRelicAssertHelper()
# no web server
self.opts.set_web_server('none')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(7, 1).assert_downloads_from_output(output)
# confirm httpd and nginx are not installed
none.assert_no_web_server_is_installed(self.build_dir)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
# check env & proc files
none.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
none.assert_no_web_dir(self.build_dir, self.opts.get_webdir())
# check php cli installed
none.assert_files_installed(self.build_dir)
nr.assert_files_installed(self.build_dir)
| {
"content_hash": "43d3c7c4c27794ebbe90103e1767f3cf",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 78,
"avg_line_length": 40.587188612099645,
"alnum_prop": 0.6013152126260413,
"repo_name": "pipeflo/php-moodle-buildpack",
"id": "05065e6ed7907fae349b63c05c9a31e9c4e5a9fa",
"size": "11405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_newrelic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "2528"
},
{
"name": "Python",
"bytes": "242690"
},
{
"name": "Ruby",
"bytes": "600"
},
{
"name": "Shell",
"bytes": "4168"
}
],
"symlink_target": ""
} |
from importlib import import_module
# Django
from django.conf import settings
from django.db.models.signals import post_migrate
# External
try:
from south.signals import post_migrate
South = True
except ImportError:
South = False
# User
import notifier
from notifier.models import Backend
from notifier import settings as notifier_settings
###############################################################################
## Code
###############################################################################
def create_backends(app, **kwargs):
"""
Creates/Updates Backend objects based on NOTIFIER_BACKENDS settings.
All values except `enabled` are derived from the Backend class and
not suppossed to be modified by user. They will be over-written on restart.
"""
if South and not app == 'notifier':
return
for klass in notifier_settings.BACKEND_CLASSES:
try:
backend = Backend.objects.get(name=klass.name)
except Backend.DoesNotExist:
backend = Backend()
backend.enabled = True
finally:
backend.display_name = klass.display_name
backend.name = klass.name
backend.description = klass.description
backend.klass = ('.'.join([klass.__module__, klass.__name__]))
backend.save()
def create_notifications(app, **kwargs):
"""
Creates all the notifications specified in notifiers.py for all apps
in INSTALLED_APPS
"""
if South and not app == 'notifier':
return
for installed_app in settings.INSTALLED_APPS:
try:
import_module(installed_app + '.notifications')
except ImportError:
pass
if South:
post_migrate.connect(
create_backends,
dispatch_uid="notifier.management.create_backends"
)
post_migrate.connect(
create_notifications,
dispatch_uid="notifier.management.create_notifications",
)
else:
post_migrate.connect(
create_backends,
dispatch_uid="notifier.management.create_backends",
sender=notifier.models
)
post_migrate.connect(
create_notifications,
dispatch_uid="notifier.management.create_notifications",
sender=notifier.models
)
| {
"content_hash": "438295a3ca94360250a4ed38e0e3d7f7",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 27.662650602409638,
"alnum_prop": 0.60801393728223,
"repo_name": "iberben/django-notifier",
"id": "e77c01a01402e4575621c81b74ea7296844b445e",
"size": "2477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifier/management/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "45708"
}
],
"symlink_target": ""
} |
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from moments.extensions import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
msg = Message(current_app.config['MOMENTS_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
app = current_app._get_current_object()
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
def send_confirm_account_email(user, token):
print user.email
send_email(subject=u'确认你的账户', to=user.email, template='emails/confirm', user=user, token=token)
def send_reset_password_email(user, token):
send_email(subject=u'密码重置确认', to=user.email, template='emails/reset_password', user=user, token=token)
| {
"content_hash": "f16214044a6a94e03297ff8e0a37b7b6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 106,
"avg_line_length": 29.84375,
"alnum_prop": 0.7047120418848167,
"repo_name": "greyli/moments-dev",
"id": "09ca47915e5d6b5faf0afdb0e56fdd8fc236deab",
"size": "1003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moments/emails.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2488"
},
{
"name": "HTML",
"bytes": "45617"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "81758"
}
],
"symlink_target": ""
} |
""" test the scalar Timestamp """
import calendar
from datetime import datetime, timedelta
import locale
import unicodedata
import dateutil
from dateutil.tz import tzutc
import numpy as np
import pytest
import pytz
from pytz import timezone, utc
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz, get_timezone
from pandas.compat import PY2, PY3, long
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas import NaT, Period, Timedelta, Timestamp
import pandas.util.testing as tm
from pandas.tseries import offsets
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
# Work around https://github.com/pandas-dev/pandas/issues/22342
# different normalizations
if not PY2:
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day,)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
# GH#21336, GH#21365
dt = Timestamp('2100-01-01 00:00:00')
assert dt.resolution == Timedelta(nanoseconds=1)
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
if tz is not None:
result = Timestamp(result).tz_convert('UTC')
else:
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result).tz_convert('UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with pytest.raises(TypeError, match='Cannot convert input'):
Timestamp(slice(2))
with pytest.raises(ValueError, match='Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with pytest.raises(TypeError, match='must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with pytest.raises(ValueError, match='at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with pytest.raises(ValueError, match="Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_strptime(self):
# GH25016
# Test support for Timestamp.strptime
fmt = '%Y%m%d-%H%M%S-%f%z'
ts = '20190129-235348-000001+0000'
with pytest.raises(NotImplementedError):
Timestamp.strptime(ts, fmt)
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('z', ['Z0', 'Z00'])
def test_constructor_invalid_Z0_isostring(self, z):
# GH 8910
with pytest.raises(ValueError):
Timestamp('2014-11-02 01:00{}'.format(z))
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
@pytest.mark.parametrize('tz', [None, pytz.timezone('US/Pacific')])
def test_disallow_setting_tz(self, tz):
# GH 3746
ts = Timestamp('2010')
with pytest.raises(AttributeError):
ts.tz = tz
@pytest.mark.parametrize('offset', ['+0300', '+0200'])
def test_construct_timestamp_near_dst(self, offset):
# GH 20854
expected = Timestamp('2016-10-30 03:00:00{}'.format(offset),
tz='Europe/Helsinki')
result = Timestamp(expected).tz_convert('Europe/Helsinki')
assert result == expected
@pytest.mark.parametrize('arg', [
'2013/01/01 00:00:00+09:00', '2013-01-01 00:00:00+09:00'])
def test_construct_with_different_string_format(self, arg):
# GH 12064
result = Timestamp(arg)
expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
assert result == expected
def test_construct_timestamp_preserve_original_frequency(self):
# GH 22311
result = Timestamp(Timestamp('2010-08-08', freq='D')).freq
expected = offsets.Day()
assert result == expected
def test_constructor_invalid_frequency(self):
# GH 22311
with pytest.raises(ValueError, match="Invalid frequency:"):
Timestamp('2012-01-01', freq=[])
@pytest.mark.parametrize('box', [datetime, Timestamp])
def test_depreciate_tz_and_tzinfo_in_datetime_input(self, box):
# GH 23579
kwargs = {'year': 2018, 'month': 1, 'day': 1, 'tzinfo': utc}
with tm.assert_produces_warning(FutureWarning):
Timestamp(box(**kwargs), tz='US/Pacific')
def test_dont_convert_dateutil_utc_to_pytz_utc(self):
result = Timestamp(datetime(2018, 1, 1), tz=tzutc())
expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc())
assert result == expected
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) is utc
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.round(Timestamp(x).value / 1e9)) ==
int(np.round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize('value, check_kwargs', [
[946688461000000000, {}],
[946688461000000000 / long(1000), dict(unit='us')],
[946688461000000000 / long(1000000), dict(unit='ms')],
[946688461000000000 / long(1000000000), dict(unit='s')],
[10957, dict(unit='D', h=0)],
pytest.param((946688461000000000 + 500000) / long(1000000000),
dict(unit='s', us=499, ns=964),
marks=pytest.mark.skipif(not PY3,
reason='using truediv, so these'
' are like floats')),
pytest.param((946688461000000000 + 500000000) / long(1000000000),
dict(unit='s', us=500000),
marks=pytest.mark.skipif(not PY3,
reason='using truediv, so these'
' are like floats')),
pytest.param((946688461000000000 + 500000) / long(1000000),
dict(unit='ms', us=500),
marks=pytest.mark.skipif(not PY3,
reason='using truediv, so these'
' are like floats')),
pytest.param((946688461000000000 + 500000) / long(1000000000),
dict(unit='s'),
marks=pytest.mark.skipif(PY3,
reason='get chopped in py2')),
pytest.param((946688461000000000 + 500000000) / long(1000000000),
dict(unit='s'),
marks=pytest.mark.skipif(PY3,
reason='get chopped in py2')),
pytest.param((946688461000000000 + 500000) / long(1000000),
dict(unit='ms'),
marks=pytest.mark.skipif(PY3,
reason='get chopped in py2')),
[(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)],
[(946688461000000000 + 500000000) / long(1000000),
dict(unit='ms', us=500000)],
[946688461000000000 / 1000.0 + 5, dict(unit='us', us=5)],
[946688461000000000 / 1000.0 + 5000, dict(unit='us', us=5000)],
[946688461000000000 / 1000000.0 + 0.5, dict(unit='ms', us=500)],
[946688461000000000 / 1000000.0 + 0.005, dict(unit='ms', us=5, ns=5)],
[946688461000000000 / 1000000000.0 + 0.5, dict(unit='s', us=500000)],
[10957 + 0.5, dict(unit='D', h=12)]])
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
def test_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
t1 = Timestamp('2019-01-01 10:00', freq='H')
assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
t2 = Timestamp('2019-01-02 12:00', tz='UTC', freq='T')
assert t2.tz_convert(tz='UTC').freq == t2.freq
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
assert ts.value == expected_value + 4 * 3600 * 1000000000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp('20130501T071545.123456789')
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1293840000000000010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate(object):
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
assert r == 2342145.5
def test_compare_2000(self):
r = Timestamp('2000-04-12').to_julian_date()
assert r == 2451646.5
def test_compare_2100(self):
r = Timestamp('2100-08-12').to_julian_date()
assert r == 2488292.5
def test_compare_hour01(self):
r = Timestamp('2000-08-12T01:00:00').to_julian_date()
assert r == 2451768.5416666666666666
def test_compare_hour13(self):
r = Timestamp('2000-08-12T13:00:00').to_julian_date()
assert r == 2451769.0416666666666666
class TestTimestampConversion(object):
def test_conversion(self):
# GH#9255
ts = Timestamp('2000-01-01')
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, 'ns')
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows_python_3
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp('20090415', tz=gettz('US/Eastern'), freq='D')
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.max.to_pydatetime()).value / 1000 ==
Timestamp.max.value / 1000)
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 ==
Timestamp.min.value / 1000)
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp('2009-04-15 16:17:18', tz='US/Eastern')
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period('D')
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
| {
"content_hash": "092d10d2c86701643989c7ddbd8fe4b0",
"timestamp": "",
"source": "github",
"line_count": 988,
"max_line_length": 88,
"avg_line_length": 39.70040485829959,
"alnum_prop": 0.575769936773404,
"repo_name": "MJuddBooth/pandas",
"id": "b55d00b44fd6736d43003d6031d7d5b201ef5314",
"size": "39224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/scalar/timestamp/test_timestamp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0005_auto_20170423_0859'),
]
operations = [
migrations.AddField(
model_name='book',
name='copies',
field=models.IntegerField(default=1),
),
]
| {
"content_hash": "1fa055583faca4cda00cbe819aca94f2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 20.61111111111111,
"alnum_prop": 0.5849056603773585,
"repo_name": "AthmanZiri/vitabu",
"id": "650622c839e60ec5d06c14effd47b5c0126c892e",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitabu/books/migrations/0006_book_copies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124414"
},
{
"name": "HTML",
"bytes": "9099"
},
{
"name": "JavaScript",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "14126"
}
],
"symlink_target": ""
} |
import os, pprint
from os import removedirs
def underscore_to_camelcase(name):
name = name.title().replace("_", "")
return name[0].lower() + name[1:]
def underscore_to_camelcase_upper(name):
name = name.title().replace("_", "")
return name[0].upper() + name[1:]
def remove_folder(folder):
""" Remove folder with all its files """
for root, dirs, files in os.walk(folder, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
removedirs(folder)
reply_suffixes = ("reply", "details", "l2fibtableentry")
def is_reply(name):
return name.lower().endswith(reply_suffixes)
def is_details(name):
return name.lower().endswith(reply_suffixes[1]) or name.lower().endswith(reply_suffixes[2])
def is_retval_field(name):
return name == 'retval'
dump_suffix = "dump"
def is_dump(name):
return name.lower().endswith(dump_suffix)
def get_reply_suffix(name):
for reply_suffix in reply_suffixes:
if name.lower().endswith(reply_suffix):
if reply_suffix == reply_suffixes[2]:
# FIXME workaround for l2_fib_table_entry
return 'entry'
else:
return reply_suffix
# Mapping according to:
# http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/types.html
#
# Unsigned types are converted to signed java types that have the same size.
# It is the API user responsibility to interpret them correctly.
jni_2_java_type_mapping = {'u8': 'byte',
'u8[]': 'byte[]',
'i8': 'byte',
'i8[]': 'byte[]',
'u16': 'short',
'u16[]': 'short[]',
'i16': 'short',
'i16[]': 'short[]',
'u32': 'int',
'u32[]': 'int[]',
'i32': 'int',
'i32[]': 'int[]',
'u64': 'long',
'u64[]': 'long[]',
'i64': 'long',
'i64[]': 'long[]',
'f64': 'double',
'f64[]': 'double[]'
}
vpp_2_jni_type_mapping = {'u8': 'jbyte',
'u8[]': 'jbyteArray',
'i8': 'jbyte',
'u8[]': 'jbyteArray',
'u16': 'jshort',
'u16[]': 'jshortArray',
'i16': 'jshort',
'i16[]': 'jshortArray',
'u32': 'jint',
'u32[]': 'jintArray',
'i32': 'jint',
'i32[]': 'jintArray',
'u64': 'jlong',
'u64[]': 'jlongArray',
'i64': 'jlong',
'i64[]': 'jlongArray',
'f64': 'jdouble',
'f64[]': 'jdoubleArray'
}
# https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/types.html#type_signatures
jni_2_signature_mapping = {'u8': 'B',
'u8[]': '[B',
'i8': 'B',
'i8[]': '[B',
'u16': 'S',
'u16[]': '[S',
'i16': 'S',
'i16[]': '[S',
'u32': 'I',
'u32[]': '[I',
'i32': 'I',
'i32[]': '[I',
'u64': 'J',
'u64[]': '[J',
'i64': 'J',
'i64[]': '[J',
'f64': 'D',
'f64[]': '[D'
}
# https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/functions.html#Get_type_Field_routines
jni_field_accessors = {'u8': 'ByteField',
'u8[]': 'ObjectField',
'i8': 'ByteField',
'i8[]': 'ObjectField',
'u16': 'ShortField',
'u16[]': 'ObjectField',
'i16': 'ShortField',
'i16[]': 'ObjectField',
'u32': 'IntField',
'u32[]': 'ObjectField',
'i32': 'IntField',
'i32[]': 'ObjectField',
'u64': 'LongField',
'u64[]': 'ObjectField',
'i64': 'LongField',
'i64[]': 'ObjectField',
'f64': 'DoubleField',
'f64[]': 'ObjectField'
}
# vpe.api calls that do not follow naming conventions and have to be handled exceptionally when finding reply -> request mapping
# FIXME in vpe.api
unconventional_naming_rep_req = {
}
#
# FIXME no convention in the naming of events (notifications) in vpe.api
notifications_message_suffixes = ("event", "counters")
notification_messages_reused = ["sw_interface_set_flags"]
# messages that must be ignored. These messages are INSUFFICIENTLY marked as disabled in vpe.api
# FIXME
ignored_messages = []
def is_notification(name):
""" Returns true if the structure is a notification regardless of its no other use """
return is_just_notification(name) or name.lower() in notification_messages_reused
def is_just_notification(name):
""" Returns true if the structure is just a notification and has no other use """
return name.lower().endswith(notifications_message_suffixes)
def is_ignored(param):
return param.lower() in ignored_messages
def remove_reply_suffix(camel_case_name_with_suffix):
return remove_suffix(camel_case_name_with_suffix, get_reply_suffix(camel_case_name_with_suffix))
def remove_suffix(camel_case_name_with_suffix, suffix):
suffix_length = len(suffix)
return camel_case_name_with_suffix[:-suffix_length] if suffix_length != 0 else camel_case_name_with_suffix
def is_control_ping(camel_case_name_with_suffix):
return camel_case_name_with_suffix.lower().startswith("controlping");
def api_message_to_javadoc(api_message):
""" Converts vpe.api message description to javadoc """
str = pprint.pformat(api_message, indent=4, width=120, depth=None)
return " * " + str.replace("\n", "\n * ")
notification_dto_suffix = "Notification"
def add_notification_suffix(camel_case_dto_name):
camel_case_dto_name += notification_dto_suffix
return camel_case_dto_name
def is_array(java_type_as_string):
return java_type_as_string.endswith("[]")
| {
"content_hash": "7fd27f5f4a7cfe8b72aa89f756d1b12d",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 128,
"avg_line_length": 34.86363636363637,
"alnum_prop": 0.4667535853976532,
"repo_name": "wfnex/openbras",
"id": "947fc31d48aef63b6605dfdcf486adc9c060ad2a",
"size": "7523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/VPP/src/vpp-api/java/jvpp/gen/jvppgen/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "14862"
},
{
"name": "Batchfile",
"bytes": "4261"
},
{
"name": "C",
"bytes": "13613148"
},
{
"name": "C++",
"bytes": "20488678"
},
{
"name": "CSS",
"bytes": "40942"
},
{
"name": "Emacs Lisp",
"bytes": "104244"
},
{
"name": "Gnuplot",
"bytes": "48840"
},
{
"name": "HTML",
"bytes": "2553584"
},
{
"name": "Java",
"bytes": "120574"
},
{
"name": "LLVM",
"bytes": "4067"
},
{
"name": "Logos",
"bytes": "162"
},
{
"name": "Lua",
"bytes": "79957"
},
{
"name": "M4",
"bytes": "8638"
},
{
"name": "Makefile",
"bytes": "5632150"
},
{
"name": "Max",
"bytes": "84390"
},
{
"name": "Objective-C",
"bytes": "32394"
},
{
"name": "Perl",
"bytes": "1406609"
},
{
"name": "Perl 6",
"bytes": "2356"
},
{
"name": "PostScript",
"bytes": "152076"
},
{
"name": "Python",
"bytes": "1450179"
},
{
"name": "Roff",
"bytes": "740"
},
{
"name": "Ruby",
"bytes": "4020"
},
{
"name": "Shell",
"bytes": "187908"
},
{
"name": "Tcl",
"bytes": "397"
},
{
"name": "Yacc",
"bytes": "17850"
}
],
"symlink_target": ""
} |
import unittest
import json
from lock import RPiLock
from user import User
from requests import Response
try:
from unittest.mock import patch, MagicMock, mock_open
except ImportError:
from mock import patch, MagicMock, mock_open
FAKE_LOCKS = [
{
'pk': 1,
'serial': 'randomserial',
},
]
class LockTestCase(unittest.TestCase):
"""Test the lock class."""
def setUp(self):
class Pi(object):
def __init__(self):
self.pulsewidth = 0
def set_servo_pulsewidth(self, pin_num, pulsewidth):
self.pulsewidth = pulsewidth
def get_servo_pulsewidth(self, pin_num):
return self.pulsewidth
modules = {
'pigpio': MagicMock(),
'socketIO_client': MagicMock(),
}
self.fake_imports = patch.dict('sys.modules', modules)
self.fake_serial = patch(
'io.open',
new=mock_open(read_data="Serial: randomserial"),
create=True
)
mock_response = MagicMock(
spec=Response,
response=json.dumps(FAKE_LOCKS)
)
mock_response.json.return_value = FAKE_LOCKS
self.fake_lock_id = patch(
'requests.get',
return_value=mock_response,
)
self.fake_imports.start()
self.fake_serial.start()
self.fake_lock_id.start()
self.server = 'localhost'
self.user = User('test_user', 'password', self.server)
self.lock = RPiLock(self.user, self.server)
self.lock.pi = Pi()
def tearDown(self):
self.fake_imports.stop()
self.fake_serial.stop()
self.fake_lock_id.stop()
def test_init_lock(self):
"""Test instantiate a RPiLock object."""
self.assertTrue('lock' and 'unlock' in self.lock.avail_actions)
self.assertEqual(self.lock.model, 'motorized')
self.assertEqual(self.lock.server, 'localhost')
def test_get_serial(self):
self.assertEqual(self.lock.serial, 'randomserial')
def test_get_serverside_lock_id(self):
self.assertEqual(self.lock.lock_id, 1)
def test_action_not_permitted(self):
self.assertRaises(ValueError, self.lock.control_motorized, 'action')
def test_action_motorized_unlock(self):
self.lock.control_motorized('unlock')
self.assertEqual(self.lock.pi.get_servo_pulsewidth(18), 600)
def test_action_motorized_lock(self):
self.lock.control_motorized('lock')
self.assertEqual(self.lock.pi.get_servo_pulsewidth(18), 2400)
@patch(
'requests.patch',
return_value=MagicMock(
spec=Response,
response=json.dumps({'res': 'res'})
),
)
def test_update_serverside_status(self, req):
data = {
'event_id': 1,
'action': 'lock'
}
res = self.lock.update_serverside_status(data)
self.assertTrue('lock_res' and 'event_res' in res)
@patch(
'requests.post',
return_value=MagicMock(
spec=Response,
response=json.dumps({'pk': 2})
)
)
def test_self_register(self, req):
self.assertTrue(self.lock.self_register(), 2)
def test_lock_user_init(self):
self.assertTrue(
'username' and 'password' in dir(self.user)
)
@patch(
'requests.head',
return_value=MagicMock(
spec=Response,
status_code=200,
response=json.dumps({})
)
)
def test_lock_user_login(self, req):
self.assertEqual(self.user.login().status_code, 200)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8e5d76c38e640368da3bbaeb03d9c682",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 28.584615384615386,
"alnum_prop": 0.5756189451022605,
"repo_name": "Secured-Pi/raspberry-pi-client",
"id": "a053b51fcf5b80d6be199dc018fa7908ba601d0f",
"size": "3763",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "src/test_lock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28183"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
import requests
import pprint
from orionsdk import SwisClient
def main():
# Connect to SWIS
server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(server, username, password)
alert_name = 'NTA Alert on machine-hostname'
query_results = swis.query('Select Uri FROM Orion.AlertConfigurations WHERE Name = @alertname_par', alertname_par=alert_name)
uri = query_results['results'][0]['Uri']
# Disable alert
props = {
'Enabled': False
}
swis.update(uri, **props)
# Enable alert
props = {
'Enabled': True
}
swis.update(uri, **props)
if __name__ == '__main__':
main()
| {
"content_hash": "e88bf7bc512f5a0a9be84e8e0e9db0d1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 129,
"avg_line_length": 21.78787878787879,
"alnum_prop": 0.6244784422809457,
"repo_name": "solarwinds/orionsdk-python",
"id": "2f3ce5d5e04ad7893560e31274a30a3800087d9e",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/nta_enable_disable_alert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41555"
}
],
"symlink_target": ""
} |
from main import BaseHandler
from models.blog_post import blog_key
from google.appengine.ext import ndb
import time
class DeletePostHandler(BaseHandler):
"""Delete post if authored by user"""
def get(self):
if self.user:
post_id = self.request.get("post")
key = ndb.Key('BlogPost', int(post_id), parent=blog_key())
post = key.get()
if not post:
self.error(404)
return
else:
if post.author.username == self.user.username:
self.render("deletepost.html", post=post)
else:
error_message = "You are not permitted to delete a post"\
" that you have not created."
self.redirect('/unauthorized?error=%s' % error_message)
else:
# render login page with message that you have been redirected
error_message = "You can't delete a post without logging in."
self.redirect('/login?error=' + error_message)
def post(self):
post_id = self.request.get("post")
key = ndb.Key('BlogPost', int(post_id), parent=blog_key())
post = key.get()
if post and self.user and post.author.username == self.user.username:
key.delete()
time.sleep(0.1)
self.redirect("/blog")
| {
"content_hash": "c55db52191f1cc17bea4cf4735c4299a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 37.2972972972973,
"alnum_prop": 0.5557971014492754,
"repo_name": "ashutoshpurushottam/wishper-blog",
"id": "c4775ddcb8fa53a5a1e29821b4ced65f4673c64f",
"size": "1380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/delete_post.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157470"
},
{
"name": "HTML",
"bytes": "18131"
},
{
"name": "JavaScript",
"bytes": "299031"
},
{
"name": "Python",
"bytes": "23863"
}
],
"symlink_target": ""
} |
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'kdheepak/ewspy'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| {
"content_hash": "26d7f2370b40530ebdb02790bb28cf8a",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 30.883333333333333,
"alnum_prop": 0.6807879114948732,
"repo_name": "kdheepak/ewspy",
"id": "be35d63ec0639542e4a31ff773672addf7e52688",
"size": "3752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "travis_pypi_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2264"
},
{
"name": "Python",
"bytes": "13275"
}
],
"symlink_target": ""
} |
from selenium import webdriver
class Clicker:
def start_clicking(self):
driver = webdriver.Chrome()
driver.get("https://pep.pxl.be/Personeelsregister/PersoneelZoeken.aspx")
name_field = driver.find_element_by_id("ctl00_ContentPlaceHolder1_txtZoekenNaam")
name_field.click()
name_field.send_keys("willekens")
driver.find_element_by_id("ctl00_ContentPlaceHolder1_btnZoeken").click()
driver.find_element_by_css_selector("#ctl00_ContentPlaceHolder1_gridZoekResultaat_ctl02_hlnkNaam").click()
| {
"content_hash": "7b74b54a30da9470a32b29b4816fbe40",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 114,
"avg_line_length": 39.42857142857143,
"alnum_prop": 0.7137681159420289,
"repo_name": "wgroeneveld/productivity-course",
"id": "445294b7ae9e9447ced3a4b5a06baf0ce44841ff",
"size": "552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/selenium/clicker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7931"
},
{
"name": "HTML",
"bytes": "3107"
},
{
"name": "Shell",
"bytes": "23"
}
],
"symlink_target": ""
} |
from st2actions.runners.pythonrunner import Action
import requests
class MmonitBaseAction(Action):
def __init__(self, config):
super(MmonitBaseAction, self).__init__(config=config)
self.user = config['username']
self.password = config['password']
self.url = config['host']
self.session = requests.session()
def login(self):
self.session.get(self.url)
data = {"z_csrf_protection": "off",
"z_username": self.user,
"z_password": self.password}
login = self.session.post("{}/z_security_check".format(self.url), data=data)
if login.status_code != 200:
raise Exception("Could not login to mmonit {}.".format(login.reason))
def logout(self):
self.session.get("{}/login/logout.csp".format(self.url))
self.session.close()
def run(self, **kwargs):
# pylint: disable=notimplemented-raised
raise NotImplemented("You need to override this in your class.")
| {
"content_hash": "6db965ca84d82813b36f7b04e6c2f994",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 84,
"avg_line_length": 34.93103448275862,
"alnum_prop": 0.6169792694965449,
"repo_name": "pidah/st2contrib",
"id": "d8fdc1615c8eae2fdf54a013704699d18119463d",
"size": "1013",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "packs/mmonit/actions/lib/mmonit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "4592"
},
{
"name": "Python",
"bytes": "665076"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "15738"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
import oslo_messaging
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.services.trunk.rpc import agent
from neutron.tests import base
class TrunkSkeletonTest(base.BaseTestCase):
# TODO(fitoduarte): add more test to improve coverage of module
@mock.patch("neutron.api.rpc.callbacks.resource_manager."
"ConsumerResourceCallbacksManager.register")
@mock.patch("neutron.common.rpc.get_server")
def test___init__(self, mocked_get_server, mocked_register):
test_obj = agent.TrunkSkeleton()
self.assertEqual(2, mocked_register.call_count)
calls = [mock.call(test_obj.handle_trunks, resources.TRUNK),
mock.call(test_obj.handle_subports, resources.SUBPORT)]
mocked_register.assert_has_calls(calls, any_order=True)
# Test to see if the call to rpc.get_server has the correct
# target and the correct endpoints
topic = resources_rpc.resource_type_versioned_topic(resources.SUBPORT)
subport_target = oslo_messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=True)
topic = resources_rpc.resource_type_versioned_topic(resources.TRUNK)
trunk_target = oslo_messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=True)
calls = [mock.call(subport_target, mock.ANY),
mock.call(trunk_target, mock.ANY)]
mocked_get_server.assert_has_calls(calls, any_order=True)
self.assertIn("ResourcesPushRpcCallback",
str(mocked_get_server.call_args_list))
| {
"content_hash": "0f5d6b9245689ab0ce9ae684ee2934ac",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 47.34285714285714,
"alnum_prop": 0.6928183464091732,
"repo_name": "eayunstack/neutron",
"id": "8ed87c2ce65473fb7f3b8d0da661bcc60ed5e9e9",
"size": "2257",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/services/trunk/rpc/test_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
} |
"""User subscription table.
A subscription is any time-limited modification to a user's privileges,
such as increased storage quota. Subscriptions may be paid (initially
supporting iOS in-app purchases) or granted for other reasons such as
referring new users.
"""
__author__ = 'ben@emailscrubbed.com (Ben Darnell)'
from copy import deepcopy
import time
from viewfinder.backend.base import util
from viewfinder.backend.db import vf_schema
from viewfinder.backend.db.base import DBObject
from viewfinder.backend.db.range_base import DBRangeObject
from viewfinder.backend.op.notification_manager import NotificationManager
from viewfinder.backend.services import itunes_store
kITunesPrefix = 'itunes:'
@DBObject.map_table_attributes
class Subscription(DBRangeObject):
"""User subscription data object."""
__slots__ = []
_table = DBObject._schema.GetTable(vf_schema.SUBSCRIPTION)
# Since our subscriptions are a combination of storage quotas and
# feature access, give each one its own product type for now.
_ITUNES_PRODUCTS = {
# vf_sub1 = "Viewfinder Plus" - cloud storage option and 5GB
'vf_sub1': dict(product_type='vf_sub1', quantity=5),
# vf_sub2 = "Viewfinder Pro" - cloud storage, store originals, and 50GB
'vf_sub2': dict(product_type='vf_sub2', quantity=50),
}
_JSON_ATTRIBUTES = set(['transaction_id', 'subscription_id', 'timestamp', 'expiration_ts', 'product_type',
'quantity', 'payment_type'])
"""Subset of subscription attributes that are returned to the owning user in query_users."""
@classmethod
def _GetITunesProductInfo(cls, verify_response):
"""Maps iTunes product names to Subscription attributes.
An iTunes "product" also includes information about the billing
cycle; by convention we name our products with a suffix of "_month"
or "_year" (etc).
"""
product_id = verify_response.GetProductId()
base_product, billing_cycle = product_id.rsplit('_', 1)
assert billing_cycle in ('month', 'year'), billing_cycle
return Subscription._ITUNES_PRODUCTS[base_product]
@classmethod
def GetITunesTransactionId(cls, verify_response):
"""Returns the transaction id for an iTunes transaction.
The returned id is usable as a range key for Subscription.Query.
"""
return kITunesPrefix + verify_response.GetRenewalTransactionId()
@classmethod
def GetITunesSubscriptionId(cls, verify_response):
"""Returns the subscription id for an iTunes transaction.
THe returned id will be the same for all transactions in a series of renewals.
"""
return kITunesPrefix + verify_response.GetOriginalTransactionId()
@classmethod
def CreateFromITunes(cls, user_id, verify_response):
"""Creates a subscription object for an iTunes transaction.
The verify_response argument is a response from
viewfinder.backend.services.itunes_store.ITunesStoreClient.VerifyReceipt.
The new object is returned but not saved to the database.
"""
assert verify_response.IsValid()
sub_dict = dict(
user_id=user_id,
transaction_id=Subscription.GetITunesTransactionId(verify_response),
subscription_id=Subscription.GetITunesSubscriptionId(verify_response),
timestamp=verify_response.GetTransactionTime(),
expiration_ts=verify_response.GetExpirationTime(),
payment_type='itunes',
extra_info=verify_response.GetLatestReceiptInfo(),
renewal_data=verify_response.GetRenewalData(),
)
sub_dict.update(**Subscription._GetITunesProductInfo(verify_response))
sub = Subscription.CreateFromKeywords(**sub_dict)
return sub
@classmethod
def RecordITunesTransaction(cls, client, callback, user_id, verify_response):
"""Creates a subscription record for an iTunes transaction and saves it to the database.
The verify_response argument is a response from
viewfinder.backend.services.itunes_store.ITunesStoreClient.VerifyReceipt.
"""
sub = Subscription.CreateFromITunes(user_id, verify_response)
sub.Update(client, callback)
@classmethod
def RecordITunesTransactionOperation(cls, client, callback, user_id, verify_response_str):
def _OnRecord():
NotificationManager.NotifyRecordSubscription(client, user_id, callback=callback)
verify_response = itunes_store.VerifyResponse.FromString(verify_response_str)
assert verify_response.IsValid()
Subscription.RecordITunesTransaction(client, _OnRecord, user_id, verify_response)
@classmethod
def QueryByUser(cls, client, callback, user_id, include_expired=False,
include_history=False):
"""Returns a list of Subscription objects for the given user.
By default only includes currently-active subscriptions, and only
one transaction per subscription. To return expired subscriptions,
pass include_expired=True. To return all transactions (even those
superceded by a renewal transaction for the same subscription),
pass include_history=True (which implies include_expired=True).
"""
history_results = []
latest = {}
def _VisitSub(sub, callback):
if include_history:
history_results.append(sub)
else:
if sub.expiration_ts < time.time() and not include_expired:
callback()
return
# Only one transaction per subscription.
if (sub.subscription_id in latest and
latest[sub.subscription_id].timestamp > sub.timestamp):
callback()
return
latest[sub.subscription_id] = sub
callback()
def _OnVisitDone():
if include_history:
assert not latest
callback(history_results)
else:
assert not history_results
callback(latest.values())
Subscription.VisitRange(client, user_id, None, None, _VisitSub, _OnVisitDone)
def MakeMetadataDict(self):
"""Project a subset of subscription attributes that can be provided to the user."""
sub_dict = {}
for attr_name in Subscription._JSON_ATTRIBUTES:
util.SetIfNotNone(sub_dict, attr_name, getattr(self, attr_name, None))
if self.extra_info:
sub_dict['extra_info'] = deepcopy(self.extra_info)
return sub_dict
| {
"content_hash": "594195afad2d70c1ee97b688dbae8023",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 108,
"avg_line_length": 38.484472049689444,
"alnum_prop": 0.7198192382182053,
"repo_name": "peixiaobin/viewfinder",
"id": "34021a5b7c0c409ae4233f5fa913562f945fbcb7",
"size": "6249",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "backend/db/subscription.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "2490"
},
{
"name": "C",
"bytes": "10803"
},
{
"name": "C++",
"bytes": "1631234"
},
{
"name": "CSS",
"bytes": "235868"
},
{
"name": "Emacs Lisp",
"bytes": "17259"
},
{
"name": "HTML",
"bytes": "350953"
},
{
"name": "Java",
"bytes": "462356"
},
{
"name": "JavaScript",
"bytes": "872817"
},
{
"name": "Makefile",
"bytes": "2384"
},
{
"name": "Objective-C",
"bytes": "251484"
},
{
"name": "Objective-C++",
"bytes": "2744208"
},
{
"name": "Protocol Buffer",
"bytes": "41461"
},
{
"name": "Python",
"bytes": "4463930"
},
{
"name": "Ruby",
"bytes": "415"
},
{
"name": "Shell",
"bytes": "48002"
}
],
"symlink_target": ""
} |
import markov, time, random
from blick import BlickLoader
from google import search
from random import randrange
b = BlickLoader(grammarType="default")
phonetics = []
phonetics.append(["th", " TH", " DH"])
phonetics.append(["sh", " SH"])
phonetics.append(["ee", " IY1"])
phonetics.append(["ai", " EY1"])
phonetics.append(["oo", " UW1", " UH1"])
phonetics.append(["ou", " AW1", " AW2", " UW2"])
phonetics.append(["oi", " OY2"])
phonetics.append(["oy", " OY1"])
phonetics.append(["oa", " OW1"])
phonetics.append(["ng", " NG"])
phonetics.append(["e", " IY2", " EH1", " EH2", " EY2", " ER1", " ER2", " ER0"])
phonetics.append(["i", " IH1", " IH2", " IH0", " AY1", " AY2"])
phonetics.append(["a", " AE1", " AE2", " AO2", " AH0"])
phonetics.append(["o", " AO1", " AA1", " AA2", " OW2", " OW0"])
phonetics.append(["u", " UW0", " UH2", " AH1", " AH2"])
phonetics.append(["p", " P"])
phonetics.append(["b", " B"])
phonetics.append(["f", " F"])
phonetics.append(["v", " V"])
phonetics.append(["m", " M"])
phonetics.append(["w", " W"])
phonetics.append(["t", " T"])
phonetics.append(["d", " D"])
phonetics.append(["s", " S"])
phonetics.append(["z", " Z", " ZH"])
phonetics.append(["n", " N"])
phonetics.append(["l", " L"])
phonetics.append(["r", " R"])
phonetics.append(["y", " Y", " IY0"])
phonetics.append(["k", " K"])
phonetics.append(["g", " G"])
phonetics.append(["j", " JH"])
phonetics.append(["h", " HH"])
phonetics.append(["c", " K", " S"])
phonetics.append(["x", " K S", " EH1 K S"])
phonetics.append(["q", " K"])
def phonetify(word):
"""Processes the generated word via the blick lib, it breaks each letter into
the phonetic chunk that blick expects to be able to rate the word."""
results = []
results.append(word)
for phon in phonetics:
y = 0
while y < len(results):
if phon[0] in results[y]:
if len(phon) > 2:
for x in range(2, len(phon)):
newresult = results[y].replace(phon[0], phon[x])
results.append(newresult)
results[y]=results[y].replace(phon[0], phon[1])
y += 1
for x in range (0, len(results)):
results[x] = results[x].strip()
resultset = set(results)
return resultset
# Load the dictionary into the markov chain
chain = markov.MarkovChain()
dictionary = "morewords"
for word in open(dictionary):
word = word.strip()
if word != "" and not word.endswith("'s"):
chain.add(word.lower())
#Make a word, check if it is within set range, search google for it, save it. Up to 5000 words
words = 0
while words < 5000:
word = "".join(chain.random_output())
if len(word) > 4 and len(word) < 10:
score = 100
blickified = phonetify(word)
for blicked in blickified:
try:
#sometimes this bails out, instead of tracking it down each time, this was an easy out
thisscore = b.assessWord(blicked)
except:
score = 100
if thisscore < score:
score=thisscore
if score > 0 and score < 18:
try:
first_url = search('"' + word + '"',num=10, stop=1)
for x in range(1,10):
this_url = first_url.next()
print str(score) + ',' + word + " - bad " + this_url
except StopIteration:
with open('proop.out', 'a') as outfile:
outfile.write(str(score) + ',' + word + '\n')
print str(score) + ',' + word + " - good"
words += 1
time.sleep(randrange(10,120))
| {
"content_hash": "c4eb1d668231f32bd78fb3c6a82d3642",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 102,
"avg_line_length": 35.96,
"alnum_prop": 0.5525583982202447,
"repo_name": "jordanrinke/proop",
"id": "18683cfc0854cd8787a68266ff616e02f70e39ef",
"size": "3596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5626"
}
],
"symlink_target": ""
} |
"""An integration test for datastore_write_it_pipeline
This test creates entities and writes them to Cloud Datastore. Subsequently,
these entities are read from Cloud Datastore, compared to the expected value
for the entity, and deleted.
There is no output; instead, we use `assert_that` transform to verify the
results in the pipeline.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import random
import unittest
from datetime import datetime
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.io.gcp.datastore.v1new import datastore_write_it_pipeline
except ImportError:
datastore_write_it_pipeline = None # type: ignore
class DatastoreWriteIT(unittest.TestCase):
NUM_ENTITIES = 1001
LIMIT = 500
def run_datastore_write(self, limit=None):
test_pipeline = TestPipeline(is_integration_test=True)
current_time = datetime.now().strftime("%m%d%H%M%S")
seed = random.randint(0, 100000)
kind = 'testkind%s%d' % (current_time, seed)
pipeline_verifiers = [PipelineStateMatcher()]
extra_opts = {
'kind': kind,
'num_entities': self.NUM_ENTITIES,
'on_success_matcher': all_of(*pipeline_verifiers)
}
if limit is not None:
extra_opts['limit'] = limit
datastore_write_it_pipeline.run(
test_pipeline.get_full_options_as_args(**extra_opts))
@attr('IT')
@unittest.skipIf(
datastore_write_it_pipeline is None, 'GCP dependencies are not installed')
def test_datastore_write_limit(self):
self.run_datastore_write(limit=self.LIMIT)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "b44e0d22ce33dd73b9a05a77e76f1120",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 29.095238095238095,
"alnum_prop": 0.7266775777414075,
"repo_name": "iemejia/incubator-beam",
"id": "7b3c01b2b72c25a786ec7e5ad81df10b0f44ee70",
"size": "2618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/gcp/datastore/v1new/datastore_write_it_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
import json
import logging
import socket
from django.utils.translation import ugettext as _
from sqoop import client, conf
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from exception import handle_rest_exception
from django.views.decorators.cache import never_cache
__all__ = ['driver']
LOG = logging.getLogger(__name__)
@never_cache
def driver(request):
response = {
'status': 0,
'errors': None,
'driver': None
}
if request.method == 'GET':
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, conf.SECURITY_ENABLED.get(), conf.MECHANISM.get(), request.LANGUAGE_CODE)
response['driver'] = c.get_driver().to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get driver.')))
return JsonResponse(response)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
| {
"content_hash": "7142561966a2579aa19f3d8e76f9755d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 148,
"avg_line_length": 30.647058823529413,
"alnum_prop": 0.7264875239923224,
"repo_name": "mapr/hue",
"id": "102f94e3713cb7f4d54a92cb7577fb87cf2d6886",
"size": "1834",
"binary": false,
"copies": "1",
"ref": "refs/heads/hue-3.9.0-mapr",
"path": "apps/sqoop/src/sqoop/api/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9984620"
},
{
"name": "C++",
"bytes": "196076"
},
{
"name": "CSS",
"bytes": "374307"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3682996"
},
{
"name": "JavaScript",
"bytes": "963632"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "21427931"
},
{
"name": "Shell",
"bytes": "33699"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "XSLT",
"bytes": "190688"
}
],
"symlink_target": ""
} |
print("Hello World")
print("Second Line")
| {
"content_hash": "3a7735d526d34becc71ce8c41ff79806",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 20,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.6976744186046512,
"repo_name": "fsoustra/myPiLapse",
"id": "26847cb0d9f2837b24ec8221882b968b6891e052",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/test.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
from pex.pex import PEX
from pants.backend.python.python_chroot import PythonChroot
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.tasks.python_task import PythonTask
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.base.workunit import WorkUnit
from pants.util.contextutil import temporary_file
class PythonEval(PythonTask):
class Error(TaskError):
"""A richer failure exception type useful for tests."""
def __init__(self, *args, **kwargs):
compiled = kwargs.pop('compiled')
failed = kwargs.pop('failed')
super(PythonEval.Error, self).__init__(*args, **kwargs)
self.compiled = compiled
self.failed = failed
_EVAL_TEMPLATE_PATH = os.path.join('templates', 'python_eval', 'eval.py.mustache')
@staticmethod
def _is_evalable(target):
return isinstance(target, (PythonLibrary, PythonBinary))
@classmethod
def register_options(cls, register):
super(PythonEval, cls).register_options(register)
register('--fail-slow', action='store_true', default=False,
help='Compile all targets and present the full list of errors.')
register('--closure', action='store_true', default=False,
help='Eval all targets in the closure individually instead of just the targets '
'specified on the command line.')
def execute(self):
targets = self.context.targets() if self.get_options().closure else self.context.target_roots
with self.invalidated(filter(self._is_evalable, targets),
topological_order=True) as invalidation_check:
compiled = self._compile_targets(invalidation_check.invalid_vts)
return compiled # Collected and returned for tests
def _compile_targets(self, invalid_vts):
with self.context.new_workunit(name='eval-targets', labels=[WorkUnit.MULTITOOL]):
compiled = []
failed = []
for vt in invalid_vts:
target = vt.target
return_code = self._compile_target(target)
if return_code == 0:
vt.update() # Ensure partial progress is marked valid
compiled.append(target)
else:
if self.get_options().fail_slow:
failed.append(target)
else:
raise self.Error('Failed to eval {}'.format(target.address.spec),
compiled=compiled,
failed=[target])
if failed:
msg = 'Failed to evaluate {} targets:\n {}'.format(
len(failed),
'\n '.join(t.address.spec for t in failed))
raise self.Error(msg, compiled=compiled, failed=failed)
return compiled
def _compile_target(self, target):
# "Compiles" a target by forming an isolated chroot of its sources and transitive deps and then
# attempting to import each of the target's sources in the case of a python library or else the
# entry point in the case of a python binary.
#
# For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:
#
# if __name__ == '__main__':
# import lib.core
# import lib.util
#
# For a binary with entry point lib.bin:main the "compiler" main file would look like:
#
# if __name__ == '__main__':
# from lib.bin import main
#
# In either case the main file is executed within the target chroot to reveal missing BUILD
# dependencies.
with self.context.new_workunit(name=target.address.spec):
modules = []
if isinstance(target, PythonBinary):
source = 'entry_point {}'.format(target.entry_point)
components = target.entry_point.rsplit(':', 1)
module = components[0]
if len(components) == 2:
function = components[1]
data = TemplateData(source=source,
import_statement='from {} import {}'.format(module, function))
else:
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
else:
for path in target.sources_relative_to_source_root():
if path.endswith('.py'):
if os.path.basename(path) == '__init__.py':
module_path = os.path.dirname(path)
else:
module_path, _ = os.path.splitext(path)
source = 'file {}'.format(os.path.join(target.target_base, path))
module = module_path.replace(os.path.sep, '.')
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
if not modules:
# Nothing to eval, so a trivial compile success.
return 0
interpreter = self.select_interpreter_for_targets([target])
if isinstance(target, PythonBinary):
pexinfo, platforms = target.pexinfo, target.platforms
else:
pexinfo, platforms = None, None
with self.temporary_pex_builder(interpreter=interpreter, pex_info=pexinfo) as builder:
with self.context.new_workunit(name='resolve'):
chroot = PythonChroot(
context=self.context,
targets=[target],
builder=builder,
platforms=platforms,
interpreter=interpreter)
chroot.dump()
with temporary_file() as imports_file:
generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH),
chroot=chroot.path(),
modules=modules)
generator.write(imports_file)
imports_file.close()
builder.set_executable(imports_file.name, '__pants_python_eval__.py')
builder.freeze()
pex = PEX(builder.path(), interpreter=interpreter)
with self.context.new_workunit(name='eval',
labels=[WorkUnit.COMPILER, WorkUnit.RUN, WorkUnit.TOOL],
cmd=' '.join(pex.cmdline())) as workunit:
returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
self.context.log.error('Failed to eval {}'.format(target.address.spec))
return returncode
| {
"content_hash": "03c7df97eb73beaf7edbec45ed9cd14f",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 100,
"avg_line_length": 40.969325153374236,
"alnum_prop": 0.6227912548667266,
"repo_name": "tejal29/pants",
"id": "602376f746c1bace0e1bcff1e037f8fbbf2fb01c",
"size": "6825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/python/tasks/python_eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10977"
},
{
"name": "GAP",
"bytes": "4810"
},
{
"name": "HTML",
"bytes": "75563"
},
{
"name": "Java",
"bytes": "47798"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "5348"
},
{
"name": "Python",
"bytes": "2364916"
},
{
"name": "Scala",
"bytes": "5556"
},
{
"name": "Shell",
"bytes": "39930"
},
{
"name": "Thrift",
"bytes": "1841"
},
{
"name": "XML",
"bytes": "8658"
}
],
"symlink_target": ""
} |
import uuid
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.db import migrations, models, transaction
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils import translation
from django.utils.encoding import force_str
from modelcluster.fields import ParentalKey
from wagtail.core.signals import pre_validate_delete
from wagtail.core.utils import get_content_languages, get_supported_content_language_variant
from .copying import _copy
def pk(obj):
if isinstance(obj, models.Model):
return obj.pk
else:
return obj
class LocaleManager(models.Manager):
def get_for_language(self, language_code):
"""
Gets a Locale from a language code.
"""
return self.get(language_code=get_supported_content_language_variant(language_code))
class Locale(models.Model):
#: The language code that represents this locale
#:
#: The language code can either be a language code on its own (such as ``en``, ``fr``),
#: or it can include a region code (such as ``en-gb``, ``fr-fr``).
language_code = models.CharField(max_length=100, unique=True)
# Objects excludes any Locales that have been removed from LANGUAGES, This effectively disables them
# The Locale management UI needs to be able to see these so we provide a separate manager `all_objects`
objects = LocaleManager()
all_objects = models.Manager()
class Meta:
ordering = [
"language_code",
]
@classmethod
def get_default(cls):
"""
Returns the default Locale based on the site's LANGUAGE_CODE setting
"""
return cls.objects.get_for_language(settings.LANGUAGE_CODE)
@classmethod
def get_active(cls):
"""
Returns the Locale that corresponds to the currently activated language in Django.
"""
try:
return cls.objects.get_for_language(translation.get_language())
except (cls.DoesNotExist, LookupError):
return cls.get_default()
@transaction.atomic
def delete(self, *args, **kwargs):
# Provide a signal like pre_delete, but sent before on_delete validation.
# This allows us to use the signal to fix up references to the locale to be deleted
# that would otherwise fail validation.
# Workaround for https://code.djangoproject.com/ticket/6870
pre_validate_delete.send(sender=Locale, instance=self)
return super().delete(*args, **kwargs)
def language_code_is_valid(self):
return self.language_code in get_content_languages()
def get_display_name(self):
return get_content_languages().get(self.language_code)
def __str__(self):
return force_str(self.get_display_name() or self.language_code)
class TranslatableMixin(models.Model):
translation_key = models.UUIDField(default=uuid.uuid4, editable=False)
locale = models.ForeignKey(Locale, on_delete=models.PROTECT, related_name="+", editable=False)
class Meta:
abstract = True
unique_together = [("translation_key", "locale")]
@classmethod
def check(cls, **kwargs):
errors = super(TranslatableMixin, cls).check(**kwargs)
is_translation_model = cls.get_translation_model() is cls
# Raise error if subclass has removed the unique_together constraint
# No need to check this on multi-table-inheritance children though as it only needs to be applied to
# the table that has the translation_key/locale fields
if is_translation_model and ("translation_key", "locale") not in cls._meta.unique_together:
errors.append(
checks.Error(
"{0}.{1} is missing a unique_together constraint for the translation key and locale fields"
.format(cls._meta.app_label, cls.__name__),
hint="Add ('translation_key', 'locale') to {}.Meta.unique_together".format(cls.__name__),
obj=cls,
id='wagtailcore.E003',
)
)
return errors
@property
def localized(self):
"""
Finds the translation in the current active language.
If there is no translation in the active language, self is returned.
"""
try:
locale = Locale.get_active()
except (LookupError, Locale.DoesNotExist):
return self
if locale.id == self.locale_id:
return self
return self.get_translation_or_none(locale) or self
def get_translations(self, inclusive=False):
"""
Returns a queryset containing the translations of this instance.
"""
translations = self.__class__.objects.filter(
translation_key=self.translation_key
)
if inclusive is False:
translations = translations.exclude(id=self.id)
return translations
def get_translation(self, locale):
"""
Finds the translation in the specified locale.
If there is no translation in that locale, this raises a ``model.DoesNotExist`` exception.
"""
return self.get_translations(inclusive=True).get(locale_id=pk(locale))
def get_translation_or_none(self, locale):
"""
Finds the translation in the specified locale.
If there is no translation in that locale, this returns None.
"""
try:
return self.get_translation(locale)
except self.__class__.DoesNotExist:
return None
def has_translation(self, locale):
"""
Returns True if a translation exists in the specified locale.
"""
return self.get_translations(inclusive=True).filter(locale_id=pk(locale)).exists()
def copy_for_translation(self, locale):
"""
Creates a copy of this instance with the specified locale.
Note that the copy is initially unsaved.
"""
translated, child_object_map = _copy(self)
translated.locale = locale
# Update locale on any translatable child objects as well
# Note: If this is not a subclass of ClusterableModel, child_object_map will always be '{}'
for (child_relation, old_pk), child_object in child_object_map.items():
if isinstance(child_object, TranslatableMixin):
child_object.locale = locale
return translated
def get_default_locale(self):
"""
Finds the default locale to use for this object.
This will be called just before the initial save.
"""
# Check if the object has any parental keys to another translatable model
# If so, take the locale from the object referenced in that parental key
parental_keys = [
field
for field in self._meta.get_fields()
if isinstance(field, ParentalKey)
and issubclass(field.related_model, TranslatableMixin)
]
if parental_keys:
parent_id = parental_keys[0].value_from_object(self)
return (
parental_keys[0]
.related_model.objects.defer().select_related("locale")
.get(id=parent_id)
.locale
)
return Locale.get_default()
@classmethod
def get_translation_model(cls):
"""
Returns this model's "Translation model".
The "Translation model" is the model that has the ``locale`` and
``translation_key`` fields.
Typically this would be the current model, but it may be a
super-class if multi-table inheritance is in use (as is the case
for ``wagtailcore.Page``).
"""
return cls._meta.get_field("locale").model
def bootstrap_translatable_model(model, locale):
"""
This function populates the "translation_key", and "locale" fields on model instances that were created
before wagtail-localize was added to the site.
This can be called from a data migration, or instead you could use the "boostrap_translatable_models"
management command.
"""
for instance in (
model.objects.filter(translation_key__isnull=True).defer().iterator()
):
instance.translation_key = uuid.uuid4()
instance.locale = locale
instance.save(update_fields=["translation_key", "locale"])
class BootstrapTranslatableModel(migrations.RunPython):
def __init__(self, model_string, language_code=None):
if language_code is None:
language_code = get_supported_content_language_variant(settings.LANGUAGE_CODE)
def forwards(apps, schema_editor):
model = apps.get_model(model_string)
Locale = apps.get_model("wagtailcore.Locale")
locale = Locale.objects.get(language_code=language_code)
bootstrap_translatable_model(model, locale)
def backwards(apps, schema_editor):
pass
super().__init__(forwards, backwards)
class BootstrapTranslatableMixin(TranslatableMixin):
"""
A version of TranslatableMixin without uniqueness constraints.
This is to make it easy to transition existing models to being translatable.
The process is as follows:
- Add BootstrapTranslatableMixin to the model
- Run makemigrations
- Create a data migration for each app, then use the BootstrapTranslatableModel operation in
wagtail.core.models on each model in that app
- Change BootstrapTranslatableMixin to TranslatableMixin
- Run makemigrations again
- Migrate!
"""
translation_key = models.UUIDField(null=True, editable=False)
locale = models.ForeignKey(
Locale, on_delete=models.PROTECT, null=True, related_name="+", editable=False
)
@classmethod
def check(cls, **kwargs):
# skip the check in TranslatableMixin that enforces the unique-together constraint
return super(TranslatableMixin, cls).check(**kwargs)
class Meta:
abstract = True
def get_translatable_models(include_subclasses=False):
"""
Returns a list of all concrete models that inherit from TranslatableMixin.
By default, this only includes models that are direct children of TranslatableMixin,
to get all models, set the include_subclasses attribute to True.
"""
translatable_models = [
model
for model in apps.get_models()
if issubclass(model, TranslatableMixin) and not model._meta.abstract
]
if include_subclasses is False:
# Exclude models that inherit from another translatable model
root_translatable_models = set()
for model in translatable_models:
root_translatable_models.add(model.get_translation_model())
translatable_models = [
model for model in translatable_models if model in root_translatable_models
]
return translatable_models
@receiver(pre_save)
def set_locale_on_new_instance(sender, instance, **kwargs):
if not isinstance(instance, TranslatableMixin):
return
if instance.locale_id is not None:
return
# If this is a fixture load, use the global default Locale
# as the page tree is probably in an flux
if kwargs["raw"]:
instance.locale = Locale.get_default()
return
instance.locale = instance.get_default_locale()
| {
"content_hash": "a3d2db43355aaa32497fe5895c94617b",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 111,
"avg_line_length": 34.6797583081571,
"alnum_prop": 0.650317971948776,
"repo_name": "jnns/wagtail",
"id": "2c289264df996a238bbc92e0aa99a0d854b72890",
"size": "11479",
"binary": false,
"copies": "5",
"ref": "refs/heads/patch-2",
"path": "wagtail/core/models/i18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "150882"
},
{
"name": "HTML",
"bytes": "243839"
},
{
"name": "JavaScript",
"bytes": "87980"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1528233"
},
{
"name": "Shell",
"bytes": "7241"
}
],
"symlink_target": ""
} |
from ..cw_model import CWModel
class SurveyQuestion(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.sequenceNumber = None # (Integer)
self.type = None # *(Enum)
self.question = None # *(String(1000))
self.options = None # (SurveyQuestionOption[])
self.includeFlag = None # (Boolean)
self.requiredFlag = None # (Boolean)
self.noAnswerPoints = None # (Integer)
self.surveyId = None # (Integer)
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
| {
"content_hash": "5e67e0b82e9aded206fe4d0a6cd46890",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 34.1578947368421,
"alnum_prop": 0.5639445300462249,
"repo_name": "joshuamsmith/ConnectPyse",
"id": "749f99d3d9b255c4ee3518914e546f802bfc7490",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service/survey_question.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158372"
}
],
"symlink_target": ""
} |
import random
from tests.ggrc import TestCase
from freezegun import freeze_time
from datetime import date, datetime
import os
from ggrc import db
from ggrc_workflows.models import Workflow, Cycle, TaskGroup
from ggrc_workflows import start_recurring_cycles
from ggrc_workflows.services.workflow_cycle_calculator import get_cycle_calculator
from tests.ggrc_workflows.generator import WorkflowsGenerator
from tests.ggrc.api_helper import Api
from tests.ggrc.generator import ObjectGenerator
from tests.ggrc_workflows.workflow_cycle_calculator.base_workflow_test_case import BaseWorkflowTestCase
if os.environ.get('TRAVIS', False):
random.seed(1) # so we can reproduce the tests if needed
class TestAnnuallyWorkflow(BaseWorkflowTestCase):
def test_annually_workflow(self):
"""Basic annual workflow test.
"""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annual task 1',
"relative_start_day": 10, # 6/10/2015 Wed
"relative_start_month": 6,
"relative_end_day": 25, # 6/25/2015 Thu
"relative_end_month": 6,
},
{
'title': 'annual task 2',
"relative_start_day": 15, # 6/15/2015 Mon
"relative_start_month": 6,
"relative_end_day": 9, # 8/9/2015 Sun
"relative_end_month": 8,
}],
"task_group_objects": self.random_objects
},
]
}
with freeze_time("2015-6-8 13:00:00"): # Mon, 6/8/2015
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, date(2015, 6, 10))
with freeze_time("2015-6-10 13:00:00"): # Mon, 6/8/2015
start_recurring_cycles()
cycle = db.session.query(Cycle).filter(
Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, date(2015, 6, 10))
# Because end date is on Sunday, relative start day will have to be adjusted
self.assertEqual(cycle.end_date, date(2015, 8, 7))
_, cycle = self.generator.generate_cycle(wf) #2016
_, cycle = self.generator.generate_cycle(wf) #2017
_, cycle = self.generator.generate_cycle(wf) #2018
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2019, 6, 10))
self.assertEqual(cycle.start_date, date(2018, 6, 8))
self.assertEqual(cycle.end_date, date(2018, 8, 9))
def test_type_casting(self):
"""Verify type casting for string input
Test if string values get converted correctly to integers
and arithmetic works"""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [],
"task_group_objects": self.random_objects
},
]
}
task = {
'title': 'annual task 1',
"relative_start_day": "10", # 6/10/2015 Wed
"relative_start_month": "6",
"relative_end_day": "25", # 6/25/2015 Thu
"relative_end_month": "6",
}
with freeze_time("2015-7-1 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
task_group = db.session.query(TaskGroup).filter(TaskGroup.workflow_id == wf.id).one()
_, tgt = self.generator.generate_task_group_task(task_group, data=task)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, date(2016, 6, 10))
with freeze_time("2016-6-10 13:00"):
start_recurring_cycles()
cycle = db.session.query(Cycle).filter(
Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, date(2016, 6, 10))
self.assertEqual(cycle.end_date, date(2016, 6, 24)) # 6/25/2015 is Sat
self.assertEqual(active_wf.next_cycle_start_date, date(2017, 6, 9)) # 6/10/2017 is Sat
def test_task_order(self):
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annual task 1',
"relative_start_day": 21, # 6/21/2015
"relative_start_month": 6,
"relative_end_day": 25, # 6/25/2015 Thu
"relative_end_month": 6,
},
{
'title': 'annual task 2',
"relative_start_day": 11, # 6/11/2015 Thu
"relative_start_month": 6,
"relative_end_day": 16, # 6/16/2015 Tue
"relative_end_month": 6,
},
{
'title': 'annual task 6',
"relative_start_day": 2, # 7/2/2015 Thu
"relative_start_month": 7,
"relative_end_day": 15, # 7/15/2015 Wed
"relative_end_month": 7,
},
{
'title': 'annual task 3',
"relative_start_day": 3, # 6/3/2015 Wed
"relative_start_month": 6,
"relative_end_day": 15, # 6/15/2015 Mon
"relative_end_month": 6,
},
{
'title': 'annual task 4',
"relative_start_day": 8, # 6/8/2015 Mon
"relative_start_month": 6,
"relative_end_day": 15, # 6/15/2015 Mon
"relative_end_month": 6,
},
{
'title': 'annual task 5',
"relative_start_day": 2, # 7/2/2015 Thu
"relative_start_month": 6,
"relative_end_day": 15, # 6/15/2015 Mon
"relative_end_month": 6,
}],
"task_group_objects": self.random_objects
},
]
}
with freeze_time("2015-06-01 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
calculator = get_cycle_calculator(active_wf)
self.assertEqual([2, 3, 8, 11, 21, 2], [task.relative_start_day for task in calculator.tasks])
def test_adding_task_with_lesser_start_day_after_activating_workflow(self):
"""Test if NCSD gets updated correctly if user adds new task with lesser
relative start day after workflow has already been activated."""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annually task 1',
"relative_start_day": 30,
"relative_start_month": 7,
"relative_end_day": 7,
"relative_end_month": 8,
}],
"task_group_objects": []
},
]
}
task = {
'title': 'annually task 2',
"relative_start_day": 20,
"relative_start_month": 7,
"relative_end_day": 22,
"relative_end_month": 7,
}
with freeze_time("2015-07-27 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, date(2015, 7, 30))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, date(2015, 7, 30))
self.assertEqual(cycle.end_date, date(2015, 8, 7))
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2016, 7, 29))
# We add another task that starts on 20th
task_group = db.session.query(TaskGroup).filter(
TaskGroup.workflow_id == wf.id).one()
_, tgt = self.generator.generate_task_group_task(task_group, data=task)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2016, 7, 20))
def test_start_workflow_mid_cycle_with_task_before_and_after(self):
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annually task 1',
"relative_start_day": 1,
"relative_start_month": 7,
"relative_end_day": 1,
"relative_end_month": 7,
}, {
'title': 'annually task 2',
"relative_start_day": 2,
"relative_start_month": 7,
"relative_end_day": 2,
"relative_end_month": 7,
}, {
'title': 'annually task 3',
"relative_start_day": 3,
"relative_start_month": 7,
"relative_end_day": 3,
"relative_end_month": 7,
}, {
'title': 'annually task 4',
"relative_start_day": 4,
"relative_start_month": 7,
"relative_end_day": 4,
"relative_end_month": 7,
}, {
'title': 'annually task 5',
"relative_start_day": 6,
"relative_start_month": 7,
"relative_end_day": 6,
"relative_end_month": 7,
}],
"task_group_objects": []
},
]
}
with freeze_time("2015-07-03 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, date(2016, 7, 1))
cycle = db.session.query(Cycle).filter(
Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, date(2015, 7, 1))
self.assertEqual(cycle.end_date, date(2015, 7, 6))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, date(2016, 7, 1))
self.assertEqual(cycle.end_date, date(2016, 7, 6))
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2017, 6, 30))
def test_delete_all_tasks_after_cycles_were_already_created_and_create_new_task_group(self):
"""Check that workflow doesn't reset next cycle start date when all tasks are deleted after cycles were already created"""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annually task 1',
"relative_start_day": 15,
"relative_start_month": 7,
"relative_end_day": 19,
"relative_end_month": 8,
}],
"task_group_objects": []
},
]
}
new_task_group = {
"title": "task group 2",
'task_group_tasks': [
{
'title': 'annually task 1',
"relative_start_day": 13,
"relative_start_month": 7,
"relative_end_day": 17,
"relative_end_month": 7,
}],
"task_group_objects": []
}
with freeze_time("2015-6-9 13:00:00"): # Tuesday, 6/9/2015
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, date(2015, 7, 15))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, date(2015, 7, 15))
self.assertEqual(cycle.end_date, date(2015, 8, 19))
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2016, 7, 15))
_, cycle = self.generator.generate_cycle(wf) # 2016
_, cycle = self.generator.generate_cycle(wf) # 2017
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2018, 7, 13))
tg = db.session.query(TaskGroup).filter(
TaskGroup.workflow_id == wf.id).one()
response = self.generator.api.delete(tg, tg.id)
self.assert200(response)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, None)
_, tg = self.generator.generate_task_group(wf, data=new_task_group)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2018, 7, 13))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, date(2018, 7, 13))
self.assertEqual(cycle.end_date, date(2018, 7, 17))
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date, date(2019, 7, 12))
def test_workflow_mid_cycle_verify(self):
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annual task 1',
"relative_start_day": 1,
"relative_start_month": 8,
"relative_end_day": 4,
"relative_end_month": 8,
},
{
'title': 'annual task 2',
"relative_start_day": 5,
"relative_start_month": 8,
"relative_end_day": 8,
"relative_end_month": 8,
},
{
'title': 'annual task 3',
"relative_start_day": 9,
"relative_start_month": 8,
"relative_end_day": 15,
"relative_end_month": 8,
},
{
'title': 'annual task 4',
"relative_start_day": 16,
"relative_start_month": 8,
"relative_end_day": 19,
"relative_end_month": 8,
},
{
'title': 'annual task 5',
"relative_start_day": 20,
"relative_start_month": 8,
"relative_end_day": 23,
"relative_end_month": 8,
}],
"task_group_objects": []
},
]
}
with freeze_time("2015-8-10 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(Workflow).filter(Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, date(2016, 8, 1))
cycle = db.session.query(Cycle).filter(
Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, date(2015, 7, 31))
self.assertEqual(cycle.end_date, date(2015, 8, 21))
| {
"content_hash": "640141615bfb5f96dc259bf360ef648a",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 126,
"avg_line_length": 35.87614678899082,
"alnum_prop": 0.5723692622426799,
"repo_name": "hasanalom/ggrc-core",
"id": "d90213f7656d450604fa057cb1612234f677fd23",
"size": "15884",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/tests/ggrc_workflows/workflow_cycle_calculator/test_annually_workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235548"
},
{
"name": "Cucumber",
"bytes": "140478"
},
{
"name": "HTML",
"bytes": "943449"
},
{
"name": "JavaScript",
"bytes": "1205686"
},
{
"name": "Makefile",
"bytes": "5936"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1874549"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
} |
from uuid import uuid4
from cassandra.cqlengine import ValidationError
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine import columns
from tests.integration.cqlengine import is_prepend_reversed
from tests.integration.cqlengine.base import BaseCassEngTestCase
class TestQueryUpdateModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
cluster = columns.Integer(primary_key=True)
count = columns.Integer(required=False)
text = columns.Text(required=False, index=True)
text_set = columns.Set(columns.Text, required=False)
text_list = columns.List(columns.Text, required=False)
text_map = columns.Map(columns.Text, columns.Text, required=False)
class QueryUpdateTests(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(QueryUpdateTests, cls).setUpClass()
sync_table(TestQueryUpdateModel)
@classmethod
def tearDownClass(cls):
super(QueryUpdateTests, cls).tearDownClass()
drop_table(TestQueryUpdateModel)
def test_update_values(self):
""" tests calling udpate on a queryset """
partition = uuid4()
for i in range(5):
TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))
# sanity check
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == i
assert row.text == str(i)
# perform update
TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6)
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == (6 if i == 3 else i)
assert row.text == str(i)
def test_update_values_validation(self):
""" tests calling udpate on models with values passed in """
partition = uuid4()
for i in range(5):
TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))
# sanity check
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == i
assert row.text == str(i)
# perform update
with self.assertRaises(ValidationError):
TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf')
def test_invalid_update_kwarg(self):
""" tests that passing in a kwarg to the update method that isn't a column will fail """
with self.assertRaises(ValidationError):
TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)
def test_primary_key_update_failure(self):
""" tests that attempting to update the value of a primary key will fail """
with self.assertRaises(ValidationError):
TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)
def test_null_update_deletes_column(self):
""" setting a field to null in the update should issue a delete statement """
partition = uuid4()
for i in range(5):
TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))
# sanity check
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == i
assert row.text == str(i)
# perform update
TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == i
assert row.text == (None if i == 3 else str(i))
def test_mixed_value_and_null_update(self):
""" tests that updating a columns value, and removing another works properly """
partition = uuid4()
for i in range(5):
TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))
# sanity check
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == i
assert row.text == str(i)
# perform update
TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None)
for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):
assert row.cluster == i
assert row.count == (6 if i == 3 else i)
assert row.text == (None if i == 3 else str(i))
def test_counter_updates(self):
pass
def test_set_add_updates(self):
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster, text_set=set(("foo",)))
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(text_set__add=set(('bar',)))
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_set, set(("foo", "bar")))
def test_set_add_updates_new_record(self):
""" If the key doesn't exist yet, an update creates the record
"""
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(text_set__add=set(('bar',)))
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_set, set(("bar",)))
def test_set_remove_updates(self):
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster, text_set=set(("foo", "baz")))
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(
text_set__remove=set(('foo',)))
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_set, set(("baz",)))
def test_set_remove_new_record(self):
""" Removing something not in the set should silently do nothing
"""
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster, text_set=set(("foo",)))
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(
text_set__remove=set(('afsd',)))
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_set, set(("foo",)))
def test_list_append_updates(self):
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster, text_list=["foo"])
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(
text_list__append=['bar'])
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_list, ["foo", "bar"])
def test_list_prepend_updates(self):
""" Prepend two things since order is reversed by default by CQL """
partition = uuid4()
cluster = 1
original = ["foo"]
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster, text_list=original)
prepended = ['bar', 'baz']
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(
text_list__prepend=prepended)
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
expected = (prepended[::-1] if is_prepend_reversed() else prepended) + original
self.assertEqual(obj.text_list, expected)
def test_map_update_updates(self):
""" Merge a dictionary into existing value """
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster,
text_map={"foo": '1', "bar": '2'})
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(
text_map__update={"bar": '3', "baz": '4'})
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_map, {"foo": '1', "bar": '3', "baz": '4'})
def test_map_update_none_deletes_key(self):
""" The CQL behavior is if you set a key in a map to null it deletes
that key from the map. Test that this works with __update.
"""
partition = uuid4()
cluster = 1
TestQueryUpdateModel.objects.create(
partition=partition, cluster=cluster,
text_map={"foo": '1', "bar": '2'})
TestQueryUpdateModel.objects(
partition=partition, cluster=cluster).update(
text_map__update={"bar": None})
obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)
self.assertEqual(obj.text_map, {"foo": '1'})
| {
"content_hash": "ab072c7029702078cdd7f7ff94fa9aef",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 96,
"avg_line_length": 42.96788990825688,
"alnum_prop": 0.6340343760008541,
"repo_name": "mike-tr-adamson/python-driver",
"id": "a3b80f15f54b8318a4b6748b7eb39b8a41634dcd",
"size": "9942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/cqlengine/query/test_updates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "24192"
},
{
"name": "Python",
"bytes": "1569415"
}
],
"symlink_target": ""
} |
'''
Created on Sep 30, 2014
@author: Valentina
'''
import sys
import processvideo as pv
import cv2
import matplotlib.pyplot as plt
import numpy as np
import util
def track(video, cursorimg):
pos = video.tracktemplate(cursorimg, debug=True)
return pos
def write(pos, cursorpostxt="cursorpos.txt"):
print "Writing to", cursorpostxt
cursorpos = open(cursorpostxt, "w")
for p in pos:
if (p == None):
cursorpos.write("%i\t%i\n" % (-1, -1))
else:
cursorpos.write("%i\t%i\n" % (int(p[0]), int(p[1])))
cursorpos.close()
def readpos(txtfilename):
pos = util.list_of_vecs_from_txt(txtfilename)
return pos
def main_track_cursor():
videoname = sys.argv[1]
cursorfile = sys.argv[2]
video = pv.ProcessVideo(videoname)
cursor = cv2.imread(cursorfile)
pos = track(video, cursor)
cursorpostxt = video.videoname+"_cursorpos.txt"
write(pos, cursorpostxt)
def main_track_frame():
videoname = sys.argv[1]
panoramapath = sys.argv[2]
video = pv.ProcessVideo(videoname)
panorama = cv2.imread(panoramapath)
pos = video.trackframepos(panorama, False)
framepostxt = video.videoname + "_framepos.txt"
write(pos, framepostxt)
def plot_ty(pos, outfile="cursor_ty.png"):
t = np.linspace(0, len(pos)-1, len(pos))
y = [p[1] for p in pos]
print 'len(t)', len(t)
print 'len(pos)', len(pos)
print 'len(y)', len(y)
plt.plot(t, y,'b.')
plt.xlabel("Frame Number")
plt.ylabel("Cursor y-position")
plt.xlim(0, len(pos))
plt.savefig(outfile)
plt.show()
plt.close()
if __name__ == "__main__":
main_track_frame() | {
"content_hash": "e48258761b77058eeebb7dee205f80e7",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 64,
"avg_line_length": 26,
"alnum_prop": 0.6095571095571095,
"repo_name": "adobe-research/video-lecture-summaries",
"id": "fcb24121956f7e82c8047e73d7f75866e9449894",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/trackcursor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6400"
},
{
"name": "BlitzBasic",
"bytes": "63"
},
{
"name": "CSS",
"bytes": "43297"
},
{
"name": "HTML",
"bytes": "15459294"
},
{
"name": "JavaScript",
"bytes": "239670"
},
{
"name": "PostScript",
"bytes": "3330579"
},
{
"name": "Python",
"bytes": "738196"
},
{
"name": "Ruby",
"bytes": "573"
},
{
"name": "TeX",
"bytes": "10314"
}
],
"symlink_target": ""
} |
import word
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='wordgrapher',
version=word.__version__,
description='Word Graph utility built with NLTK and TextBlob',
long_description=(read("README.md")),
keywords='tf-idf nlp graph machine learning',
license=read("LICENSE"),
author='Batista Harahap',
author_email='batista@bango29.com',
url='https://github.com/tistaharahap/WordGraph',
setup_requires=['nltk', 'textblob>=0.5.0', 'greenlet', 'gevent'],
packages=['word', 'mmirman'],
classifiers=(
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
"Topic :: Text Processing :: Linguistic",
)
) | {
"content_hash": "c363a851219beb047cc187c6dc8b750a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 27.151515151515152,
"alnum_prop": 0.6417410714285714,
"repo_name": "tistaharahap/WordGraph",
"id": "6445493cebe6a544910bc11cbd98a796d87d5e8f",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22303"
}
],
"symlink_target": ""
} |
import re
import time
import requests
from typing import Union, List
from ..exceptions import APIError, RaceCardError, InvalidResponse
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
from .. import resources
from ..compat import json
class RaceCard(BaseEndpoint):
"""
RaceCard operations.
"""
app_key = None
def login(self, session: requests.Session = None) -> None:
"""
Parses app key from betfair exchange site.
:param requests.session session: Requests session object
"""
session = session or self.client.session
try:
response = session.get(self.login_url)
except requests.ConnectionError as e:
raise APIError(None, self.login_url, None, e)
except Exception as e:
raise APIError(None, self.login_url, None, e)
app_key = re.findall(
r'''"appKey":\s"(.*?)"''', response.content.decode("utf-8")
)
if app_key:
self.app_key = app_key[0]
else:
raise RaceCardError("Unable to find appKey")
def get_race_card(
self,
market_ids: list,
data_entries: str = None,
session: requests.Session = None,
lightweight: bool = False,
) -> Union[list, List[resources.RaceCard]]:
"""
Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard]
"""
if not self.app_key:
raise RaceCardError(
"You need to login before requesting a race_card\n"
"APIClient.race_card.login()"
)
params = self.create_race_card_req(market_ids, data_entries)
(response, response_json, elapsed_time) = self.request(
"raceCard", params=params, session=session
)
return self.process_response(
response_json, resources.RaceCard, elapsed_time, lightweight
)
def get_race_result(
self,
market_ids: list,
data_entries: str = None,
session: requests.Session = None,
lightweight: bool = True,
) -> list:
"""
Returns a list of race results based on event ids provided.
:param list market_ids: The filter to select desired events
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceResult]
"""
if not self.app_key:
raise RaceCardError(
"You need to login before requesting a race_card\n"
"APIClient.race_card.login()"
)
params = self.create_race_result_req(market_ids, data_entries)
(response, response_json, elapsed_time) = self.request(
"raceResults", params=params, session=session
)
return self.process_response(response_json, None, elapsed_time, lightweight)
def request(
self, method: str = None, params: dict = None, session: requests.Session = None
) -> (dict, float):
session = session or self.client.session
time_sent = time.time()
url = "%s%s" % (self.url, method)
try:
response = session.get(
url,
params=params,
headers=self.headers,
timeout=(self.connect_timeout, self.read_timeout),
)
except requests.ConnectionError as e:
raise APIError(None, method, params, e)
except Exception as e:
raise APIError(None, method, params, e)
elapsed_time = time.time() - time_sent
check_status_code(response)
try:
response_json = json.loads(response.content.decode("utf-8"))
except ValueError:
raise InvalidResponse(response.text)
return response, response_json, elapsed_time
@staticmethod
def create_race_card_req(market_ids: list, data_entries: str) -> dict:
if not data_entries:
data_entries = "RACE, TIMEFORM_DATA, RUNNERS, RUNNER_DETAILS"
return {"dataEntries": data_entries, "marketId": ",".join(market_ids)}
@staticmethod
def create_race_result_req(market_ids: list, data_entries: str) -> dict:
if not data_entries:
data_entries = "RUNNERS, MARKETS, PRICES, RACE, COURSE"
return {
"dataEntries": data_entries,
"marketId": ",".join(market_ids),
"sortBy": "DATE_DESC",
}
@property
def headers(self) -> dict:
return {
"Connection": "keep-alive",
"Content-Type": "application/json",
"X-Application": self.app_key,
}
@property
def login_url(self) -> str:
return "https://www.betfair.com/exchange/plus/"
@property
def url(self) -> str:
return "https://www.betfair.com/rest/v2/"
| {
"content_hash": "40caf56db644cb9ab15b2e5014c5388d",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 87,
"avg_line_length": 33.782051282051285,
"alnum_prop": 0.5910815939278937,
"repo_name": "liampauling/betfairlightweight",
"id": "a2f1a96ffeaff4aa96f86d3415a929e378c94ea0",
"size": "5270",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "betfairlightweight/endpoints/racecard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "305902"
}
],
"symlink_target": ""
} |
from collections.abc import Mapping
from flask import g
from marshmallow import EXCLUDE, Schema
from webargs.flaskparser import FlaskParser
from webargs.multidictproxy import MultiDictProxy
from werkzeug.datastructures import MultiDict
def _strip_whitespace(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, MultiDict):
return type(value)((k, _strip_whitespace(v)) for k, vals in value.lists() for v in vals)
elif isinstance(value, dict):
return {k: _strip_whitespace(v) for k, v in value.items()}
elif isinstance(value, (list, set)):
return type(value)(map(_strip_whitespace, value))
return value
class IndicoFlaskParser(FlaskParser):
"""
A custom webargs flask parser that strips surrounding whitespace.
"""
DEFAULT_LOCATION = 'json_or_form'
def load_querystring(self, req, schema):
# remove immutability since we may want to modify the data in `schema_pre_load`
return MultiDictProxy(_strip_whitespace(MultiDict(req.args)), schema)
def load_form(self, req, schema):
# remove immutability since we may want to modify the data in `schema_pre_load`
return MultiDictProxy(_strip_whitespace(MultiDict(req.form)), schema)
def load_json(self, req, schema):
return _strip_whitespace(super().load_json(req, schema))
parser = IndicoFlaskParser()
@parser.error_handler
def handle_error(error, req, schema, *, error_status_code, error_headers):
# since 6.0.0b7 errors are namespaced by their source. this is nice for APIs taking
# data from different locations to serve very specific errors, but in a typical web app
# where you usually have only one source and certainly not the same field name in different
# locations, it just makes handling errors in JS harder since we suddenly have to care if
# it's form data or json data.
# this gets even worse when using the `json_or_form_or_query` meta location where we don't
# have detailed location information anyway.
namespaced = error.messages # mutating this below is safe
error.messages = namespaced.popitem()[1]
assert not namespaced # we never expect to have more than one location
parser.handle_error(error, req, schema, error_status_code=error_status_code, error_headers=error_headers)
def _split_kwargs(kwargs):
schema_kwargs = kwargs.copy()
context = schema_kwargs.pop('context', {})
webargs_kwargs = {
a: schema_kwargs.pop(a)
for a in ('location', 'as_kwargs', 'validate', 'error_status_code', 'error_headers', 'req', 'unknown')
if a in schema_kwargs
}
return schema_kwargs, context, webargs_kwargs
def use_args(schema_cls, **kwargs):
"""Similar to webargs' ``use_args`` but allows passing schema kwargs.
This makes it much easier to use ``partial=True`` for PATCH endpoints.
:param schema_cls: A marshmallow Schema or an argmap dict.
:param kwargs: Any keyword arguments that are supported by ``use_args`` or the
Schema constructor.
"""
schema_kwargs, context, webargs_kwargs = _split_kwargs(kwargs)
webargs_kwargs.setdefault('unknown', EXCLUDE)
if isinstance(schema_cls, Mapping):
schema_cls = parser.schema_class.from_dict(schema_cls)
elif isinstance(schema_cls, Schema):
raise TypeError('Pass a schema or an argmap instead of a schema instance to use_args/use_kwargs')
def factory(req):
return schema_cls(**schema_kwargs, context=context)
return parser.use_args(factory, **webargs_kwargs)
def use_kwargs(schema_cls, **kwargs):
"""Like ``use_args``, but using kwargs when calling the decorated function."""
kwargs['as_kwargs'] = True
return use_args(schema_cls, **kwargs)
def use_rh_args(schema_cls, **kwargs):
"""Similar to ``use_args`` but populates the context from RH attributes.
The Schema needs a Meta class with an ``rh_context`` attribute specifying
which attributes should be taken from the current RH.
:param schema_cls: A marshmallow Schema or an argmap dict.
:param rh_context: When using an argmap, this argument is required and behaves
exactly like the ``rh_context`` Meta attribute mentioned above.
:param kwargs: Any keyword arguments that are supported by ``use_args`` or the
Schema constructor.
"""
schema_kwargs, default_context, webargs_kwargs = _split_kwargs(kwargs)
webargs_kwargs.setdefault('unknown', EXCLUDE)
if isinstance(schema_cls, Mapping):
schema_cls = parser.schema_class.from_dict(schema_cls)
rh_context_attrs = schema_kwargs.pop('rh_context')
elif isinstance(schema_cls, Schema):
raise TypeError('Pass a schema or an argmap instead of a schema instance to use_rh_args/use_rh_kwargs')
else:
if 'rh_context' in schema_kwargs:
raise TypeError('The `rh_context` kwarg is only supported when passing an argmap')
rh_context_attrs = schema_cls.Meta.rh_context
def factory(req):
context = dict(default_context)
context.update((arg, getattr(g.rh, arg, None)) for arg in rh_context_attrs)
return schema_cls(context=context, **schema_kwargs)
return parser.use_args(factory, **webargs_kwargs)
def use_rh_kwargs(schema_cls, **kwargs):
"""Like ``use_rh_args``, but using kwargs when calling the decorated function."""
kwargs['as_kwargs'] = True
return use_rh_args(schema_cls, **kwargs)
| {
"content_hash": "667c50e17b788a78c48c3e64454ef644",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 111,
"avg_line_length": 40.56617647058823,
"alnum_prop": 0.6920427768714881,
"repo_name": "ThiefMaster/indico",
"id": "a52d56b07d7dbd81719688bf7a617866172d83de",
"size": "5731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/web/args.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import time
from apns import APNs
import simplejson as json
class FeedbackProxy(object):
def __init__(self, use_sandbox, cert_file, key_file):
self.use_sandbox = use_sandbox
self.cert_file = cert_file
self.key_file = key_file
self._apns = APNs(
use_sandbox=self.use_sandbox,
cert_file=self.cert_file,
key_file=self.key_file,
)
def get(self):
conn = self._apns.feedback_server
result = {}
for item in conn.items():
token, datetime = item
result[token] = time.mktime(datetime.timetuple())
return json.dumps(result)
| {
"content_hash": "51713d8feea5c4896ca686d29a20cb46",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 24.444444444444443,
"alnum_prop": 0.5787878787878787,
"repo_name": "voyagegroup/apns-proxy-server",
"id": "a4772ff1cf113a740973f2eec75a6ef0c3b28dfd",
"size": "685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apns_proxy_server/feedback.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1268"
},
{
"name": "Python",
"bytes": "41456"
},
{
"name": "Shell",
"bytes": "832"
}
],
"symlink_target": ""
} |
import unittest
from pivotal import Pivotal, BASE_URL, PROTO_SWITCH
class PivotalTest(unittest.TestCase):
def test_protocol_switch(self):
self.assertEqual(PROTO_SWITCH[True], 'https://')
self.assertEqual(PROTO_SWITCH[False], 'http://')
def _test_url_strings(self, use_https):
pv = Pivotal('ABCDEF', use_https=use_https)
url = PROTO_SWITCH[use_https] + BASE_URL
self.assertEqual(pv.projects().url, url + 'projects')
self.assertEqual(pv.projects(123).url, url + 'projects/123')
self.assertEqual(pv.projects('123').url, url + 'projects/123')
self.assertEqual(pv.projects('123').stories().url,
url + 'projects/123/stories')
self.assertEqual(pv.projects('123').stories(filter='state:unstarted').url,
url + 'projects/123/stories?filter=state%3Aunstarted')
def test_https_urls(self):
self._test_url_strings(use_https=True)
def test_http_urls(self):
self._test_url_strings(use_https=False)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "483076f6bfc6adf447ed2686131ef161",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 33.24242424242424,
"alnum_prop": 0.6226071103008204,
"repo_name": "robhudson/pivotal-py",
"id": "76f3b39a97433c40e66209544c1d247ccfc42a12",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pivotal/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7788"
}
],
"symlink_target": ""
} |
import distutils.version as dist_version
import os
from oslo.config import cfg
from manila.db import migration
from manila.db.sqlalchemy.session import get_engine
from manila import exception
from manila.openstack.common import log as logging
import migrate
from migrate.versioning import util as migrate_util
import sqlalchemy
LOG = logging.getLogger(__name__)
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and manila depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
CONF = cfg.CONF
_REPOSITORY = None
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.Error(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(migration.INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
raise exception.Error(_("Upgrade DB using Essex release first."))
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
| {
"content_hash": "79de715b5cad8f69f9fc4c3238d8be51",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 77,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.67316766278704,
"repo_name": "aostapenko/manila",
"id": "a0fa5aaddc4576d890457a57a47ede0088a59a1f",
"size": "3956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manila/db/sqlalchemy/migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15168"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1589353"
},
{
"name": "Shell",
"bytes": "7282"
}
],
"symlink_target": ""
} |
import jwt
import structlog
from datetime import datetime, timedelta
from sqlalchemy.orm import exc
from opentaxii.auth import OpenTAXIIAuthAPI
from opentaxii.entities import Account as AccountEntity
from opentaxii.sqldb_helper import SQLAlchemyDB
from .models import Base, Account
__all__ = ['SQLDatabaseAPI']
log = structlog.getLogger(__name__)
class SQLDatabaseAPI(OpenTAXIIAuthAPI):
"""Naive SQL database implementation of OpenTAXII Auth API.
Implementation will work with any DB supported by SQLAlchemy package.
:param str db_connection: a string that indicates database dialect and
connection arguments that will be passed directly
to :func:`~sqlalchemy.engine.create_engine` method.
:param bool create_tables=False: if True, tables will be created in the DB.
:param str secret: secret string used for token generation
:param int token_ttl_secs: TTL for JWT token, in seconds.
:param engine_parameters=None: if defined, these arguments would be passed to sqlalchemy.create_engine
"""
def __init__(
self,
db_connection,
create_tables=False,
secret=None,
token_ttl_secs=None,
**engine_parameters):
self.db = SQLAlchemyDB(
db_connection, Base, session_options={
'autocommit': False, 'autoflush': True},
**engine_parameters)
if create_tables:
self.db.create_all_tables()
if not secret:
raise ValueError('Secret is not defined for %s.%s' % (
self.__module__, self.__class__.__name__))
self.secret = secret
self.token_ttl_secs = token_ttl_secs or 60 * 60 # 60min
def init_app(self, app):
self.db.init_app(app)
def authenticate(self, username, password):
try:
account = Account.query.filter_by(username=username).one()
except exc.NoResultFound:
return
if not account.is_password_valid(password):
return
return self._generate_token(account.id, ttl=self.token_ttl_secs)
def create_account(self, username, password, is_admin=False):
account = Account(username=username, is_admin=is_admin)
account.set_password(password)
self.db.session.add(account)
self.db.session.commit()
return account_to_account_entity(account)
def get_account(self, token):
account_id = self._get_account_id(token)
if not account_id:
return
account = Account.query.get(account_id)
if not account:
return
return account_to_account_entity(account)
def delete_account(self, username):
account = Account.query.filter_by(username=username).one_or_none()
if account:
self.db.session.delete(account)
self.db.session.commit()
def get_accounts(self):
return [
account_to_account_entity(account)
for account in Account.query.all()]
def update_account(self, obj, password=None):
account = Account.query.filter_by(username=obj.username).one_or_none()
if not account:
account = Account(username=obj.username)
self.db.session.add(account)
if password is not None:
account.set_password(password)
account.permissions = obj.permissions
account.is_admin = obj.is_admin
self.db.session.commit()
return account_to_account_entity(account)
def _generate_token(self, account_id, ttl=None):
ttl = ttl or self.token_ttl_secs
exp = datetime.utcnow() + timedelta(minutes=ttl)
return jwt.encode(
{'account_id': account_id, 'exp': exp},
self.secret)
def _get_account_id(self, token):
try:
payload = jwt.decode(token, self.secret)
except jwt.ExpiredSignatureError:
log.warning('Invalid token used', token=token)
return
except jwt.DecodeError:
log.warning('Can not decode a token', token=token)
return
return payload.get('account_id')
def account_to_account_entity(account):
return AccountEntity(
id=account.id,
username=account.username,
is_admin=account.is_admin,
permissions=account.permissions)
| {
"content_hash": "ce33d033920ca8ae04be3e0fa831b8fa",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 106,
"avg_line_length": 34.08527131782946,
"alnum_prop": 0.625653854901069,
"repo_name": "Intelworks/OpenTAXII",
"id": "95598def8757695fe13c51cad4074b5d27b8a3eb",
"size": "4397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opentaxii/auth/sqldb/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "203686"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
} |
import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest import config
from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
class TenantUsagesClientXML(RestClientXML):
def __init__(self, auth_provider):
super(TenantUsagesClientXML, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def _parse_array(self, node):
json = xml_to_json(node)
return json
def list_tenant_usages(self, params=None):
url = 'os-simple-tenant-usage'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
tenant_usage = self._parse_array(etree.fromstring(body))
return resp, tenant_usage['tenant_usage']
def get_tenant_usage(self, tenant_id, params=None):
url = 'os-simple-tenant-usage/%s' % tenant_id
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
tenant_usage = self._parse_array(etree.fromstring(body))
return resp, tenant_usage
| {
"content_hash": "ccbed8e16b7cf1062eb5be8ad90136fc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 66,
"avg_line_length": 30.07894736842105,
"alnum_prop": 0.6500437445319335,
"repo_name": "ntymtsiv/tempest",
"id": "93eeb00492501e701be5ff7121aa82ff35b5ab69",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/services/compute/xml/tenant_usages_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2312198"
},
{
"name": "Shell",
"bytes": "9160"
}
],
"symlink_target": ""
} |
import inspect
import uuid
from sys import version_info
if version_info[0] < 3:
str_buffer_types = [str]
else:
str_buffer_types = [bytes]
def add_tuples(a, b):
return list(map(sum, list(zip(a, b))))
def throw(x):
raise x
def random_name_suffix():
return format(uuid.uuid4().hex)
def get_padding_size(cur_offset, pragma_pack):
if cur_offset == 0:
return 0
if cur_offset <= pragma_pack:
return pragma_pack - cur_offset
return 0 if cur_offset % pragma_pack == 0 else (int(cur_offset / pragma_pack) + 1) * pragma_pack - cur_offset
def get_caller_name():
frame = inspect.currentframe().f_back.f_back
mod_name = frame.f_globals['__package__'] or frame.f_globals['__name__']
return mod_name
def paddify(size, machine_arch, pragma_pack, t):
return size + get_padding_size(size, pragma_pack or t.__align__[machine_arch])
| {
"content_hash": "0a22feb8932429f42137c58e0067159b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 113,
"avg_line_length": 22.82051282051282,
"alnum_prop": 0.648314606741573,
"repo_name": "bdr00/typedef",
"id": "a0194ba57bef830923b54c7540ecb1cacf951572",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "typedef/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "76535"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnvserver_authenticationcertpolicy_binding(base_resource) :
""" Binding class showing the authenticationcertpolicy that can be bound to vpnvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._secondary = False
self._name = ""
self._groupextraction = False
self._gotopriorityexpression = ""
self._bindpoint = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority, if any, of the VPN virtual server policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority, if any, of the VPN virtual server policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression or other value specifying the next policy to evaluate if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax or classic expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression or other value specifying the next policy to evaluate if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax or classic expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policy(self) :
ur"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
ur"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
ur"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
ur"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the virtual server.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the virtual server.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def secondary(self) :
ur"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
ur"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""Bind point to which to bind the policy. Applies only to rewrite and cache policies. If you do not set this parameter, the policy is bound to REQ_DEFAULT or RES_DEFAULT, depending on whether the policy rule is a response-time or a request-time expression.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""Bind point to which to bind the policy. Applies only to rewrite and cache policies. If you do not set this parameter, the policy is bound to REQ_DEFAULT or RES_DEFAULT, depending on whether the policy rule is a response-time or a request-time expression.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnvserver_authenticationcertpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_authenticationcertpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnvserver_authenticationcertpolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.priority = resource.priority
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnvserver_authenticationcertpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].priority = resource[i].priority
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnvserver_authenticationcertpolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnvserver_authenticationcertpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch vpnvserver_authenticationcertpolicy_binding resources.
"""
try :
obj = vpnvserver_authenticationcertpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of vpnvserver_authenticationcertpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_authenticationcertpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count vpnvserver_authenticationcertpolicy_binding resources configued on NetScaler.
"""
try :
obj = vpnvserver_authenticationcertpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of vpnvserver_authenticationcertpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_authenticationcertpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
ICA_REQUEST = "ICA_REQUEST"
OTHERTCP_REQUEST = "OTHERTCP_REQUEST"
class vpnvserver_authenticationcertpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnvserver_authenticationcertpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnvserver_authenticationcertpolicy_binding = [vpnvserver_authenticationcertpolicy_binding() for _ in range(length)]
| {
"content_hash": "167d58f0a460bcffa08dbe8d57400ccf",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 430,
"avg_line_length": 41.05604719764012,
"alnum_prop": 0.7424198879149303,
"repo_name": "atopuzov/nitro-python",
"id": "dc193a0116f8ff7acf99470ebf092878fb8a31c7",
"size": "14532",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_authenticationcertpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
import fnmatch
import os
from flask import Flask, redirect, request, send_from_directory
from flask_compress import Compress
from loguru import logger
from flexget.webserver import register_app, register_home
logger = logger.bind(name='webui')
manager = None
debug = False
app_base = None
ui_base = os.path.dirname(os.path.realpath(__file__))
ui_src = os.path.join(ui_base, 'src')
ui_dist = os.path.join(ui_base, 'app')
bower_components = os.path.join(ui_base, 'bower_components')
webui_app = Flask(__name__)
Compress(webui_app)
webui_app.url_path = '/v1/'
HTTP_METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH']
@webui_app.route('/<path:path>')
def serve_app(path):
if debug:
if path.startswith('bower_components'):
return send_from_directory(
bower_components, path.lstrip('bower_components').lstrip('/')
)
if os.path.exists(os.path.join(ui_src, path)):
return send_from_directory(ui_src, path)
if not app_base:
return send_from_directory(ui_base, 'load.failure.html')
return send_from_directory(app_base, path)
@webui_app.route('/api/')
@webui_app.route('/api/<path:path>', methods=HTTP_METHODS)
def api_redirect(path='/'):
return redirect(request.full_path.replace('/v1', '', 1), 307)
@webui_app.route('/')
def root():
if not app_base:
return send_from_directory(ui_base, 'load.failure.html')
return send_from_directory(app_base, 'app.html')
def _find(path, f):
matches = []
for root_dir, _, file_names in os.walk(path):
for filename in fnmatch.filter(file_names, f):
matches.append(os.path.join(root_dir, filename))
return matches
def register_web_ui(mgr):
global manager, app_base, debug
manager = mgr
if 'debug' in manager.args:
debug = True
if debug:
app_base = os.path.join(ui_base, '.tmp', 'serve')
if not os.path.exists(app_base):
logger.warning(
'Unable to start web ui in debug mode. To enable debug mode please run the debug build, '
'see http://flexget.com/wiki/Web-UI for instructions'
)
logger.warning('Attempting to serve web ui from complied directory')
app_base = None
if not app_base:
app_base = ui_dist
if not os.path.exists(app_base):
logger.critical(
'Failed to start web ui,'
' this can happen if you are running from GitHub version and forgot to run the web ui build, '
'see http://flexget.com/wiki/Web-UI for instructions'
)
app_base = None
register_app(webui_app.url_path, webui_app, 'WebUI (v1)')
register_home('%s/' % webui_app.url_path)
| {
"content_hash": "e9ca5f57117ba1035814a9eeb36fe4f5",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 110,
"avg_line_length": 29.526315789473685,
"alnum_prop": 0.6213903743315508,
"repo_name": "malkavi/Flexget",
"id": "77d7225cfaa6e4cbc464bf0a8dc232842c1fcbf5",
"size": "2805",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/ui/v1/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "84425"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3514392"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1530"
}
],
"symlink_target": ""
} |
""" Merges 3 CSV files into 1.
The two first columns identify the records.
First file: numerators
Columns: product, template, nc, na
Second file: denominators
Columns: product, template, denominator
Third file: strengths
Columns: product, template, strength
**Example of usage**
``python3 -m barbante.scripts.merge_product_product_collections num.csv denom.csv strengths.csv output_file.csv``
**Output**
It saves a CSV file with the following columns: product, template, nc, na, denominator, strength.
"""
import json
import sys
import traceback
from time import time
import barbante.utils.logging as barbante_logging
log = barbante_logging.get_logger(__name__)
def merge_collections(numerators_file, denominators_file, strengths_file, output_file):
log.info("----------")
log.info("Start.")
start = time()
f_numerators = open(numerators_file, 'rU')
f_denominators = open(denominators_file, 'rU')
f_strengths = open(strengths_file, 'rU')
# skips the headers
next(f_numerators)
next(f_denominators)
next(f_strengths)
f_output = open(output_file, 'w')
f_output.write("product,template_product,nc,na,denominator,strength\n")
numerator_key, nc, na = yield_numerator(f_numerators)
denominator_key, denominator = yield_denominator(f_denominators)
strength_key, strength = yield_strength(f_strengths)
done = 0
while True:
keys = []
if numerator_key is not None:
keys += [numerator_key]
if denominator_key is not None:
keys += [denominator_key]
if strength_key is not None:
keys += [strength_key]
if len(keys) == 0:
break # exhausted all files
min_key = min(keys)
merged_doc = {"product": min_key[0],
"template_product": min_key[1]}
if numerator_key == min_key:
merged_doc["nc"] = nc
merged_doc["na"] = na
numerator_key, nc, na = yield_numerator(f_numerators)
else:
merged_doc["nc"] = ""
merged_doc["na"] = ""
if denominator_key == min_key:
merged_doc["denominator"] = denominator
denominator_key, denominator = yield_denominator(f_denominators)
else:
merged_doc["denominator"] = ""
if strength_key == min_key:
merged_doc["strength"] = strength
strength_key, strength = yield_strength(f_strengths)
else:
merged_doc["strength"] = ""
write_to_file(merged_doc, f_output)
done += 1
if done % 100000 == 0:
log.info("Done writing %d lines." % done)
f_numerators.close()
f_denominators.close()
f_strengths.close()
f_output.close()
log.info("End. Took %d seconds." % (time() - start))
def yield_numerator(numerators_handler):
try:
numerator_line = next(numerators_handler).split(",")
numerator_key = (numerator_line[0], numerator_line[1])
nc = int(numerator_line[2])
na = int(numerator_line[3])
except:
numerator_key, nc, na = None, None, None
return numerator_key, nc, na
def yield_denominator(denominators_handler):
try:
denominator_line = next(denominators_handler).split(",")
denominator_key = (denominator_line[0], denominator_line[1])
denominator = int(denominator_line[2])
except:
denominator_key, denominator = None, None
return denominator_key, denominator
def yield_strength(strengths_handler):
try:
strength_line = next(strengths_handler).split(",")
strength_key = (strength_line[0], strength_line[1])
strength = float(strength_line[2])
except:
strength_key, strength = None, None
return strength_key, strength
def write_to_file(document, output_handler):
line = ','.join([str(document["product"]),
str(document["template_product"]),
str(document["nc"]),
str(document["na"]),
str(document["denominator"]),
str(document["strength"])]) + '\n'
output_handler.write(line)
def main(argv):
if len(argv) < 4:
msg = "You must specify the numerators file, the denominators file, " \
"the strengths file and the output file."
log.error(msg)
return json.dumps({"success": False, "message": msg})
try:
# command-line arguments
numerators_file = argv[0]
denominators_file = argv[1]
strengths_file = argv[2]
output_file = argv[3]
merge_collections(numerators_file, denominators_file, strengths_file, output_file)
except Exception:
log.exception('Exception on {0}'.format(__name__))
return json.dumps({"success": False,
"message": traceback.format_exc()})
return_json = json.dumps({"success": True})
return return_json
if __name__ == '__main__':
print(main(sys.argv[1:]))
| {
"content_hash": "d6809eec7e14b972e579e9f7c5fac4a0",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 121,
"avg_line_length": 29.795321637426902,
"alnum_prop": 0.5960745829244357,
"repo_name": "hypermindr/barbante",
"id": "32a7f98b3bc381d7d3df94dedf3a1769d5e904ed",
"size": "5143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbante/scripts/merge_product_product_collections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "728872"
}
],
"symlink_target": ""
} |
"""
views.py
URL route handlers
Note that any handler params must match the URL route params.
For example the *say_hello* handler, handling the URL route '/hello/<username>',
must be passed *username* as the argument.
"""
from csv import DictReader
import logging
from datetime import datetime
from google.appengine.api import users, namespace_manager, memcache
from google.appengine.ext import deferred
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from google.appengine.ext.db.metadata import get_namespaces
from birthday.forms import BirthdayFileForm
from birthday.helpers import OAuthDanceHelper, DirectoryHelper
from birthday.models import get_birthdays, Client, User
from flask import request, render_template, flash, url_for, redirect, abort, g
from flask_cache import Cache
from birthday import app, constants
from decorators import login_required, admin_required
from forms import ExampleForm
from models import ExampleModel
from tasks import send_birthday_message
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
@app.before_request
def before_request():
if request.path == url_for('warmup'):
return
user = users.get_current_user()
if user:
g.logout_text = 'Logout'
g.url_logout = users.create_logout_url(url_for('admin_index'))
g.user_email = user.email()
else:
g.logout_text = 'Login'
g.url_logout = users.create_login_url(url_for('admin_index'))
g.user_email = None
g.menu = []
for endpoint, name in constants.MENU_ITEMS:
g.menu.append({
'is_active': request.path == url_for(endpoint),
'url': url_for(endpoint),
'name': name,
})
@app.route('/_ah/warmup')
def warmup():
"""App Engine warmup handler
See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests
"""
return ''
def index():
pass
@app.route('/admin/', methods=['GET'])
@admin_required
def admin_index():
"""This view requires an admin account"""
return 'Super-seekrit admin page.'
@app.route('/admin/settings/', methods=['GET', 'POST'])
@admin_required
def settings():
if request.method == 'POST':
pass
return render_template('settings.html')
@app.route('/setup/', methods=['GET'])
@login_required
def set_up():
"""
Handle the installation process for a new domain
:return: Redirect URL for OAuth
"""
domain = request.args.get('domain', None)
#TODO: Check domain is valid and user is admin in apps
client = Client.get_instance()
admin_email = users.get_current_user().email()
if not client:
#If there is no client object, create it
Client(id=1, primary_domain_name=domain,
administrators=[admin_email], reply_to=admin_email).put()
return redirect(url_for('start_oauth2_dance'))
@app.route('/oauth/start/', methods=['GET'])
@login_required
def start_oauth2_dance(domain):
client = Client.get_instance()
login_hint = users.get_current_user().email()
approval_prompt = 'auto' if client.refresh_token else 'force'
scope = constants.OAUTH2_SCOPES
redirect_uri = url_for('oauth_callback', _external=True)
oauth_helper = OAuthDanceHelper(scope=scope, redirect_uri=redirect_uri,
approval_prompt=approval_prompt)
url = oauth_helper.step1_get_authorize_url()
#TODO: Add a random token to avoid forgery
return redirect("%s?login_hint=%s" % (url, login_hint))
@app.route('/oauth/callback/', methods=['GET'])
def oauth_callback(self):
code = request.args.get('code', None)
if not code:
logging.error('No code, no authorization')
abort(500)
redirect_uri = url_for('oauth_callback', _external=True)
oauth_helper = OAuthDanceHelper(redirect_uri=redirect_uri)
credentials = oauth_helper.step2_exchange(code)
client = Client.get_instance()
client.credentials = credentials.to_json()
if credentials.refresh_token:
client.refresh_token = credentials.refresh_token
directory_helper = DirectoryHelper(client.credentials, None,
client.refresh_token)
client.customer_id = directory_helper.get_customer_id(
client.administrators[0]
)
client.put()
return redirect(url_for('settings'))
@app.route('/admin/birthdays/upload', methods=['GET', 'POST'])
@admin_required
def upload_csv():
form = BirthdayFileForm()
if form.validate_on_submit() and form.birthday_file.data:
file_request = request.files[form.birthday_file.name]
#Process CSV file
input_file = DictReader(csvfile=file_request.read(),
fieldnames=constants.BIRTHDAY_CSV_COLUMNS)
#TODO: Validate both columns are required. Feedback on errors
User.add_many_birthdays(input_file)
flash(u'File successfully uploaded.', 'success')
return render_template('upload.html', form=form)
@app.route('/admin/birthdays/template/', methods=['GET'])
@admin_required
def get_csv_template():
pass
@app.route('/admin/birthdays/', methods=['GET', 'POST'])
@admin_required
def list_birthdays():
birthdays = User.get_all_birthdays()
return render_template('list_birthdays.html', birthdays=birthdays)
@app.route('/admin/birthdays/<int:birthday_id>/edit/', methods=['GET', 'POST'])
@admin_required
def edit_birthday(birthday_id):
example = ExampleModel.get_by_id(birthday_id)
form = ExampleForm(obj=example)
if request.method == "POST":
if form.validate_on_submit():
example.example_name = form.data.get('example_name')
example.example_description = form.data.get('example_description')
example.put()
flash(u'Example %s successfully saved.' % birthday_id, 'success')
return redirect(url_for('list_examples'))
return render_template('edit_birthday.html', example=example, form=form)
@app.route('/cron/birthdays/send/daily/', methods=['GET'])
@admin_required
def send_daily_birthday_messages():
"""
It goes through all the namespaces making queries to the datastore for
users who have the same birth day and birth month as today
"""
today = datetime.now()
current_namespace = namespace_manager.get_namespace()
for namespace in get_namespaces():
# Forget about the default empty namespace
if namespace:
logging.debug("Birthdays for today [%s]-[%s] namespace [%s]",
today.month, today.day, namespace)
namespace_manager.set_namespace(namespace)
#Clear the cached list for the day
memcache.delete('birthdays')
#Go through every birthday celebrant
for celebrant in get_birthdays(month=today.month, day=today.day):
logging.info("Found a birthday for today! %s", celebrant.email)
#Schedule sending the email
deferred.defer(send_birthday_message, celebrant,
_queue="mail-queue")
#Restore to the original namespace
namespace_manager.set_namespace(current_namespace)
return 'Birthday messages were scheduled for today'
@app.route('/cron/users/sync/', methods=['GET'])
@admin_required
def sync_users():
pass
#TODO: Remove examples from before, below this line
@login_required
def list_examples():
"""List all examples"""
examples = ExampleModel.query()
form = ExampleForm()
if form.validate_on_submit():
example = ExampleModel(
example_name=form.example_name.data,
example_description=form.example_description.data,
added_by=users.get_current_user()
)
try:
example.put()
example_id = example.key.id()
flash(u'Example %s successfully saved.' % example_id, 'success')
return redirect(url_for('list_examples'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_examples'))
return render_template('list_examples.html', examples=examples, form=form)
@login_required
def edit_example(example_id):
example = ExampleModel.get_by_id(example_id)
form = ExampleForm(obj=example)
if request.method == "POST":
if form.validate_on_submit():
example.example_name = form.data.get('example_name')
example.example_description = form.data.get('example_description')
example.put()
flash(u'Example %s successfully saved.' % example_id, 'success')
return redirect(url_for('list_examples'))
return render_template('edit_example.html', example=example, form=form)
@login_required
def delete_example(example_id):
"""Delete an example object"""
example = ExampleModel.get_by_id(example_id)
try:
example.key.delete()
flash(u'Example %s successfully deleted.' % example_id, 'success')
return redirect(url_for('list_examples'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_examples'))
@cache.cached(timeout=60)
def cached_examples():
"""This view should be cached for 60 sec"""
examples = ExampleModel.query()
return render_template('list_examples_cached.html', examples=examples)
| {
"content_hash": "a7c1333486514d4969ee0a1069477ef5",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 91,
"avg_line_length": 35.76981132075472,
"alnum_prop": 0.6653655448887014,
"repo_name": "dcifuen/cloudbday",
"id": "75e7e092cb23114da22e85642ba326719d6eb1ca",
"size": "9503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/birthday/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58101"
},
{
"name": "JavaScript",
"bytes": "584264"
},
{
"name": "Python",
"bytes": "3435495"
}
],
"symlink_target": ""
} |
import os
import unittest
import sys
import time
try:
from tests_pydevd_python import debugger_unittest
except:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from tests_pydevd_python import debugger_unittest
from _pydevd_frame_eval.pydevd_frame_eval_main import frame_eval_func
IS_FRAME_EVAL_AVAILABLE = frame_eval_func is not None
class WriterThreadStepAndResume(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_breakpoint(2, 'Method1')
self.write_make_initial_run()
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_step_over(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('108', True)
assert line == 11, 'Expected return to be in line 11, was: %s' % line
# we use tracing debugger while stepping
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
# we enable frame evaluation debugger after "Resume" command
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadStepReturn(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case56.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(2, 'Call2')
self.write_make_initial_run()
thread_id, frame_id, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type()
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_get_frame(thread_id, frame_id)
self.write_step_return(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('109', True)
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
# Step return uses temporary breakpoint, so we use tracing debugger
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_step_in(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('107', True)
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
# we use tracing debugger for stepping
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadAddLineBreakWhileRun(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case3.py')
def run(self):
self.start_socket()
self.write_make_initial_run()
time.sleep(.5)
breakpoint_id = self.write_add_breakpoint(4, '')
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 4, 'Expected return to be in line 4, was: %s' % line
# we use tracing debugger if breakpoint was added while running
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_get_frame(thread_id, frame_id)
self.write_run_thread(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 4, 'Expected return to be in line 4, was: %s' % line
# we still use tracing debugger
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_get_frame(thread_id, frame_id)
self.write_remove_breakpoint(breakpoint_id)
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadExceptionBreak(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_exception_breakpoint_with_policy('IndexError', "1", "0", "0")
self.write_make_initial_run()
time.sleep(.5)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
# we use tracing debugger if there are exception breakpoints
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadAddExceptionBreakWhileRunning(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_breakpoint(2, 'Method1')
# self.write_add_exception_breakpoint_with_policy('IndexError', "1", "0", "0")
self.write_make_initial_run()
time.sleep(.5)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
# we use tracing debugger if there are exception breakpoints
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_add_exception_breakpoint_with_policy('IndexError', "1", "0", "0")
self.write_run_thread(thread_id)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 2, 'Expected return to be in line 2, was: %s' % line
# we use tracing debugger if exception break was added
assert suspend_type == "trace", 'Expected suspend type to be "trace", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadAddTerminationExceptionBreak(debugger_unittest.AbstractWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_debugger_case10.py')
def run(self):
self.start_socket()
self.write_add_breakpoint(10, 'Method2')
self.write_add_exception_breakpoint_with_policy('IndexError', "0", "1", "0")
self.write_make_initial_run()
time.sleep(.5)
thread_id, frame_id, line, suspend_type = self.wait_for_breakpoint_hit_with_suspend_type('111', True)
assert line == 10, 'Expected return to be in line 10, was: %s' % line
# we can use frame evaluation with exception breakpoint with "On termination" suspend policy
assert suspend_type == "frame_eval", 'Expected suspend type to be "frame_eval", but was: %s' % suspend_type
self.write_run_thread(thread_id)
self.finished_ok = True
@unittest.skipIf(not IS_FRAME_EVAL_AVAILABLE, "Frame evaluation debugger isn't available "
"in the current environment")
class TestFrameEval(unittest.TestCase, debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable, '-u']
def test_step_and_resume(self):
self.check_case(WriterThreadStepAndResume)
def test_step_return(self):
self.check_case(WriterThreadStepReturn)
def test_add_break_while_running(self):
self.check_case(WriterThreadAddLineBreakWhileRun)
def test_exc_break(self):
self.check_case(WriterThreadExceptionBreak)
def test_add_exc_break_while_running(self):
self.check_case(WriterThreadAddExceptionBreakWhileRunning)
def test_add_termination_exc_break(self):
self.check_case(WriterThreadAddTerminationExceptionBreak)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "db1859da97c86e320b248aedb3a26676",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 115,
"avg_line_length": 39.763636363636365,
"alnum_prop": 0.6649519890260631,
"repo_name": "vvv1559/intellij-community",
"id": "535cd8006d8a5b4f36a4e41e0fd45f6e66f5691e",
"size": "8748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/tests_pydevd_python/test_frame_eval_and_tracing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "60827"
},
{
"name": "C",
"bytes": "211454"
},
{
"name": "C#",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "199030"
},
{
"name": "CMake",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "201445"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "3246752"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1901858"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "166889152"
},
{
"name": "JavaScript",
"bytes": "570364"
},
{
"name": "Jupyter Notebook",
"bytes": "93222"
},
{
"name": "Kotlin",
"bytes": "4758504"
},
{
"name": "Lex",
"bytes": "147486"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "51370"
},
{
"name": "Objective-C",
"bytes": "28061"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl 6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6680"
},
{
"name": "Python",
"bytes": "25489147"
},
{
"name": "Roff",
"bytes": "37534"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Shell",
"bytes": "64141"
},
{
"name": "Smalltalk",
"bytes": "338"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "77"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
import json
import os
from . import ExtensionModule
from .. import dependencies
from .. import mlog
from ..mesonlib import Popen_safe, MesonException
from ..programs import ExternalProgram
class DlangModule(ExtensionModule):
class_dubbin = None
init_dub = False
def __init__(self, interpreter):
super().__init__(interpreter)
self.methods.update({
'generate_dub_file': self.generate_dub_file,
})
def _init_dub(self):
if DlangModule.class_dubbin is None:
self.dubbin = dependencies.DubDependency.class_dubbin
DlangModule.class_dubbin = self.dubbin
else:
self.dubbin = DlangModule.class_dubbin
if DlangModule.class_dubbin is None:
self.dubbin = self.check_dub()
DlangModule.class_dubbin = self.dubbin
else:
self.dubbin = DlangModule.class_dubbin
if not self.dubbin:
if not self.dubbin:
raise MesonException('DUB not found.')
def generate_dub_file(self, state, args, kwargs):
if not DlangModule.init_dub:
self._init_dub()
if len(args) < 2:
raise MesonException('Missing arguments')
config = {
'name': args[0]
}
config_path = os.path.join(args[1], 'dub.json')
if os.path.exists(config_path):
with open(config_path, encoding='utf-8') as ofile:
try:
config = json.load(ofile)
except ValueError:
mlog.warning('Failed to load the data in dub.json')
warn_publishing = ['description', 'license']
for arg in warn_publishing:
if arg not in kwargs and \
arg not in config:
mlog.warning('Without', mlog.bold(arg), 'the DUB package can\'t be published')
for key, value in kwargs.items():
if key == 'dependencies':
config[key] = {}
if isinstance(value, list):
for dep in value:
if isinstance(dep, dependencies.Dependency):
name = dep.get_name()
ret, res = self._call_dubbin(['describe', name])
if ret == 0:
version = dep.get_version()
if version is None:
config[key][name] = ''
else:
config[key][name] = version
elif isinstance(value, dependencies.Dependency):
name = value.get_name()
ret, res = self._call_dubbin(['describe', name])
if ret == 0:
version = value.get_version()
if version is None:
config[key][name] = ''
else:
config[key][name] = version
else:
config[key] = value
with open(config_path, 'w', encoding='utf-8') as ofile:
ofile.write(json.dumps(config, indent=4, ensure_ascii=False))
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def check_dub(self):
dubbin = ExternalProgram('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found dub {!r} but couldn\'t run it'
''.format(' '.join(dubbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
dubbin = False
except (FileNotFoundError, PermissionError):
dubbin = False
else:
dubbin = False
if dubbin:
mlog.log('Found DUB:', mlog.bold(dubbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found DUB:', mlog.red('NO'))
return dubbin
def initialize(*args, **kwargs):
return DlangModule(*args, **kwargs)
| {
"content_hash": "a18ed27d9bca49a4b01ce68bfcd80c51",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 94,
"avg_line_length": 36.898305084745765,
"alnum_prop": 0.4988516306844281,
"repo_name": "jpakkane/meson",
"id": "60d28854ec956e684daf7e46f68707b80c1aac7e",
"size": "5033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/modules/dlang.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "151937"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "28253"
},
{
"name": "CMake",
"bytes": "4710"
},
{
"name": "D",
"bytes": "5077"
},
{
"name": "Dockerfile",
"bytes": "1060"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "4590"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2570"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "135"
},
{
"name": "Meson",
"bytes": "354202"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "PowerShell",
"bytes": "2249"
},
{
"name": "Python",
"bytes": "2106927"
},
{
"name": "Roff",
"bytes": "301"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "2083"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9706"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
"""Integrates dogpile.cache for Pyramid
"""
from setuptools import setup, find_packages
import os.path
def project_path(*names):
return os.path.join(os.path.dirname(__file__), *names)
setup(
name='pyramid_dogpile_cache2',
version='1.2.0.dev0',
install_requires=[
'Beaker', # For parsing pylibmc behaviors from ini file.
'dogpile.cache >= 1.0.0.dev0',
'pyramid_dogpile_cache', # For ini file parsing helpers.
'setuptools',
],
author='Zeit Online',
author_email='zon-backend@zeit.de',
license='BSD',
url='https://github.com/zeitonline/pyramid_dogpile_cache2',
keywords='pyramid dogpile.cache',
classifiers="""\
Environment :: Plugins
Framework :: Pyramid
Framework :: Paste
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
""".split('\n')[:-1],
description=__doc__.strip(),
long_description='\n\n'.join(open(project_path(name)).read() for name in (
'README.rst',
'HACKING.rst',
'CHANGES.txt',
)),
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "03692ed23565bd560bcd9655d158a041",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 26.74,
"alnum_prop": 0.6230366492146597,
"repo_name": "ZeitOnline/pyramid_dogpile_cache2",
"id": "82d11981d4a7576b504125f15f5064b48de4da6f",
"size": "1337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11698"
}
],
"symlink_target": ""
} |
"""
file: document.py
Description: defines the "all purpose" document object. A document is a
holder that will contain various informations, such as a content, a set
of segmentations or annotations.
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os.path import basename
import cgi
import codecs
import logging
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
import sem
import sem.misc
from sem.logger import default_handler
from .holder import Holder
from .segmentation import Segmentation
from .corpus import Corpus
from .annotation import Tag, Annotation, tag_annotation_from_corpus, chunk_annotation_from_corpus, get_top_level
from .span import Span
document_logger = logging.getLogger("sem.storage.document")
document_logger.addHandler(default_handler)
document_logger.setLevel("WARNING")
class Document(Holder):
def __init__(self, name, content=None, encoding=None, lang=None, mime_type=None, **kwargs):
super(Document, self).__init__(**kwargs)
self._name = name
self._content = content
self._segmentations = {}
self._annotations = {}
self._corpus = Corpus()
self._metadatas = {}
if encoding is not None:
self._metadatas["encoding"] = encoding
if lang is not None:
self._metadatas["lang"] = lang
if mime_type is not None:
self._metadatas["MIME"] = mime_type
@property
def name(self):
return self._name
@property
def content(self):
return self._content
@property
def corpus(self):
return self._corpus
@content.setter
def content(self, content):
self._content = content
@property
def segmentations(self):
return self._segmentations
@property
def annotations(self):
return self._annotations
@property
def metadatas(self):
return self._metadatas
@classmethod
def from_file(cls, filename, encoding="utf-8"):
return Document(basename(filename), content=codecs.open(filename, "rU", encoding).read().replace("\r",""), encoding=encoding)
@classmethod
def from_conll(cls, filename, fields, word_field, taggings=None, chunkings=None, encoding="utf-8", **kwargs):
return Document.from_corpus(basename(filename), Corpus.from_conll(filename, fields, encoding=encoding), word_field, taggings=taggings, chunkings=chunkings, encoding=encoding, **kwargs)
@classmethod
def from_corpus(cls, name, corpus, word_field, taggings=None, chunkings=None, encoding="utf-8", **kwargs):
document = Document(name, encoding=encoding)
document._corpus = corpus
character_index = 0
sentence_index = 0
contents = []
word_spans = []
sentence_spans = []
for sentence in document._corpus.sentences:
contents.append([])
for token in sentence:
word = token[word_field]
contents[-1].append(word[:])
word_spans.append(Span(character_index, character_index+len(word)))
character_index += len(word) + 1
sentence_spans.append(Span(sentence_index, sentence_index+len(sentence)))
sentence_index += len(sentence)
document._content = u"\n".join([u" ".join(content) for content in contents])
document.add_segmentation(Segmentation("tokens", spans=word_spans))
document.add_segmentation(Segmentation("sentences", reference=document.segmentation("tokens"), spans=sentence_spans[:]))
for tagging in (taggings or []):
document.add_annotation(tag_annotation_from_corpus(document._corpus, tagging, tagging, reference=document.segmentation("tokens"), strict=True))
for chunking in (chunkings or []):
document.add_annotation(chunk_annotation_from_corpus(document._corpus, chunking, chunking, reference=document.segmentation("tokens"), strict=True))
return document
@classmethod
def from_xml(cls, xml, chunks_to_load=None, load_subtypes=True, type_separator=u"."):
if sem.misc.is_string(xml):
data = ET.parse(xml)
elif isinstance(xml, ET.ElementTree):
data = xml
elif isinstance(xml, type(ET.Element("a"))): # did not ind a better way to do this
data = xml
else:
raise TypeError("Invalid type for loading XML-SEM document: {0}".format(type(xml)))
if isinstance(data, ET.ElementTree):
root = data.getroot()
elif isinstance(data, type(ET.Element("a"))):
root = data
if root.tag == "sem":
root = list(root)[0]
elif root.tag != "document":
raise TypeError("Invalid XML document type for XML-SEM document: {0}".format(root.tag))
htmlparser = HTMLParser()
document = Document(root.attrib.get("name", u"_DOCUMENT_"))
for element in list(root):
if element.tag == "metadata":
document._metadatas = element.attrib
elif element.tag == "content":
document.content = htmlparser.unescape(element.text)
elif element.tag == "segmentations":
for segmentation in list(element):
spans = [Span(lb=int(span.attrib.get("start", span.attrib["s"])), ub=0, length=int(span.attrib.get("length", span.attrib["l"]))) for span in list(segmentation)]
reference = segmentation.get(u"reference", None)
if reference:
reference = document.segmentation(reference)
document.add_segmentation(Segmentation(segmentation.attrib[u"name"], spans=spans, reference=reference))
elif element.tag == "annotations":
for annotation in list(element):
tags = []
for tag in list(annotation):
value = tag.attrib.get(u"value",tag.attrib[u"v"])
if not load_subtypes:
value = value.strip(type_separator).split(type_separator)[0]
tags.append(Tag(value=value, lb=int(tag.attrib.get("start", tag.attrib["s"])), ub=0, length=int(tag.attrib.get("length", tag.attrib["l"]))))
reference = annotation.get(u"reference", None)
if reference:
reference = document.segmentation(reference)
annotation = Annotation(annotation.attrib[u"name"], reference=reference)
annotation.annotations = tags
document.add_annotation(annotation)
if document.segmentation("tokens") and document.segmentation("sentences"):
document.corpus.from_segmentation(document.content, document.segmentation("tokens"), document.segmentation("sentences"))
if chunks_to_load is not None:
for chunk_to_load in chunks_to_load:
cur_annot = document.annotation(chunk_to_load)
if cur_annot and cur_annot.reference is None:
document.set_reference(cur_annot.name, "tokens")
i = 0
sent_iter = iter(document.corpus)
shift = 0
present = set([(a.lb,a.ub) for a in cur_annot])
for sentence in document.segmentation("sentences"):
sent = next(sent_iter)
annots = []
while i<len(cur_annot) and cur_annot[i].ub <= sentence.ub:
annots.append(cur_annot[i])
if tuple([cur_annot[i].lb, cur_annot[i].ub]) not in present:
raise Exception
i += 1
l = [u"O" for _ in range(len(sentence))]
for annot in annots:
l[annot.lb-shift] = u"B-{0}".format(annot.value)
for j in range(annot.lb+1-shift, annot.ub-shift):
l[j] = u"I-{}".format(annot.value)
for j in range(len(l)):
sent[j]["NER"] = l[j]
shift += len(sentence)
document.corpus.fields.append(chunk_to_load)
return document
def escaped_name(self):
name = basename(self._name)
if sem.ON_WINDOWS:
return name.replace(u":", u"").replace(u"\\", u"").replace(u"?", u"").replace(u'"', u"").replace(u"<", u"").replace(u">", u"").replace(u"|", u"")
else:
return name
def get_tokens(self):
tokens = []
content = self.content
for span in self.segmentation("tokens"):
tokens.append(content[span.lb : span.ub])
return tokens
def set_content(self, content):
self._content = content
def add_segmentation(self, segmentation):
self._segmentations[segmentation.name] = segmentation
self._segmentations[segmentation.name]._document = self
def segmentation(self, name):
return self._segmentations.get(name, None)
def add_annotation(self, annotation):
self._annotations[annotation.name] = annotation
self._annotations[annotation.name]._document = self
def annotation(self, name):
return self._annotations.get(name, None)
def add_metadata(self, key, value):
self._metadatas[key] = value
def metadata(self, name):
return self._metadatas.get(name, None)
def mime_type(self):
return self.metadata("MIME")
def write(self, f, depth=0, indent=4, add_header=False):
if add_header:
f.write(u'<?xml version="1.0" encoding="{0}" ?>\n'.format(f.encoding or u"ASCII"))
f.write(u'{0}<document name="{1}">\n'.format(depth*indent*u" ", self.name))
depth += 1
f.write(u'{}<metadata'.format(depth*indent*" "))
for metakey, metavalue in sorted(self._metadatas.items()):
f.write(u' {0}="{1}"'.format(metakey, metavalue))
f.write(u' />\n')
f.write(u'{0}<content>{1}</content>\n'.format(depth*indent*" ", cgi.escape(self.content)))
if len(self.segmentations) > 0:
f.write(u'{0}<segmentations>\n'.format(depth*indent*" "))
refs = [seg.reference for seg in self.segmentations.values() if seg.reference]
for seg in sorted(self.segmentations.values(), key=lambda x: (x.reference and x.reference.reference in refs, x.name)): # TODO: create a sort_segmentations method to order them in terms of reference.
depth += 1
ref = (seg.reference.name if isinstance(seg.reference, Segmentation) else seg.reference)
ref_str = ("" if ref is None else ' reference="{0}"'.format(ref))
f.write(u'{0}<segmentation name="{1}"{2}>'.format(depth*indent*" ", seg.name, ref_str))
depth += 1
for i, element in enumerate(seg):
lf = i == 0 or (i % 5 == 0)
if lf:
f.write(u'\n{0}'.format(depth*indent*" "))
f.write(u'{0}<s s="{1}" l="{2}" />'.format(("" if lf else " "), element.lb, len(element)))
f.write(u"\n")
depth -= 1
f.write(u'{0}</segmentation>\n'.format(depth*indent*" "))
depth -= 1
f.write(u'{0}</segmentations>\n'.format(depth*indent*" "))
if len(self.annotations) > 0:
f.write(u'{0}<annotations>\n'.format(depth*indent*" "))
for annotation in self.annotations.values():
depth += 1
reference = ("" if not annotation.reference else u' reference="{0}"'.format(annotation.reference if sem.misc.is_string(annotation.reference) else annotation.reference.name))
f.write(u'{0}<annotation name="{1}"{2}>\n'.format(depth*indent*" ", annotation.name, reference))
depth += 1
for tag in annotation:
f.write(u'{0}<tag v="{1}" s="{2}" l="{3}"/>\n'.format(depth*indent*" ", tag.getValue(), tag.lb, len(tag)))
depth -= 1
f.write(u'{0}</annotation>\n'.format(depth*indent*" "))
depth -= 1
f.write(u'{0}</annotations>\n'.format(depth*indent*" "))
depth -= 1
f.write(u'{0}</document>\n'.format(depth*indent*" "))
def set_reference(self, annotation_name, reference_name, add_to_corpus=False, filter=get_top_level):
annot = self.annotation(annotation_name)
if annot is not None and (annot.reference is None or annot.reference.name != reference_name):
spans = self.segmentation(reference_name).get_reference_spans()
begin = 0
i = 0
for j, annotation in enumerate(annot):
start = annotation.lb
end = annotation.ub
while not(spans[i].lb <= start and start < spans[i].ub):
i += 1
begin = i
while spans[i].ub < end:
i += 1
annotation.lb = begin
annotation.ub = i + 1
i = max(begin-1, 0)
begin = 0
annot._reference = self.segmentation(reference_name)
if add_to_corpus:
self.add_to_corpus(annotation_name, filter=filter)
def add_to_corpus(self, annotation_name, filter=get_top_level):
base_annotations = self.annotation(annotation_name)
if not base_annotations:
raise KeyError('{0} annotation not found.'.format(annotation_name))
annotations = base_annotations.get_reference_annotations()
spans = self.segmentation("tokens").get_reference_spans()
begin = 0
i = 0
to_remove = [] # annotations that cannot be aligned with tokens will be removed
for j, annotation in enumerate(annotations):
start = annotation.lb
end = annotation.ub
while (i > 0) and start < spans[i].lb:
i -= 1
while (i < len(spans)) and not(spans[i].lb <= start < spans[i].ub):
i += 1
if i < len(spans):
begin = i
while spans[i].ub < end:
i += 1
annotation.lb = begin
annotation.ub = i + 1
else:
document_logger.warn(u"cannot add annotation {0}".format(annotation))
to_remove.append(j)
i = max(begin, 0)
begin = 0
for i in to_remove[::-1]:
del annotations[i]
if filter:
annotations = filter(annotations)
sentence_spans = iter(self.segmentation("sentences"))
annot_index = 0
if len(annotations) == 0:
annots = []
cur_annot = None
else:
annots = annotations
cur_annot = annots[annot_index]
shift = 0
for sentence in self.corpus.sentences:
span = next(sentence_spans)
for token in sentence:
token[annotation_name] = u"O"
while cur_annot is not None and cur_annot.lb >= span.lb and cur_annot.ub <= span.ub:
sentence[cur_annot.lb -shift][annotation_name] = u"B-{0}".format(cur_annot.value)
for k in range(cur_annot.lb+1, cur_annot.ub):
sentence[k - shift][annotation_name] = u"I-{0}".format(cur_annot.value)
try:
annot_index += 1
cur_annot = annots[annot_index]
except IndexError:
cur_annot = None
if cur_annot is not None and ((span.lb <= cur_annot.lb < span.ub) and cur_annot.ub > span.ub): # annotation spans over at least two sentences
document_logger.warn(u"Annotation {0} spans over multiple sentences, ignoring".format(cur_annot))
try:
annot_index += 1
cur_annot = annots[annot_index]
except IndexError:
cur_annot = None
shift += len(sentence)
self.corpus.fields.append(annotation_name)
def add_annotation_from_tags(self, tags, field, annotation_name):
BIO = all([tag[0] in u"BIO" for tag in tags[0]])
if self._annotations.get(annotation_name, None):
del self._annotations[annotation_name]._annotations[:]
if BIO:
self.add_chunking(tags, field, annotation_name)
else:
self.add_tagging(sem.misc.correct_pos_tags(tags), field, annotation_name)
if not self.corpus.has_key(field):
self.corpus.fields.append(field)
def add_tagging(self, sentence_tags, field, annotation_name):
nth_token = 0
annotation = []
for nth_sentence, tags in enumerate(sentence_tags):
if tags[0][0] == u"_":
tags[0] = tags[0].lstrip(u"_")
index = len(annotation)
i = len(tags)-1
n = 0
current = None # current tag value (for multiword tags)
while i >= 0:
change = not(current is None or tags[i].lstrip(u"_") == current)
if tags[i][0] != u"_":
if change:
tags[i] = current
annotation.insert(index, Tag(tags[i], nth_token+i, 0, length=n+1))
current = None
n = 0
else:
if current is None:
current = tags[i].lstrip(u"_")
n = 0
if change:
tags[i] = u"_" + current
n += 1
self.corpus.sentences[nth_sentence][i][field] = tags[i]
i -= 1
nth_token += len(tags)
self._annotations[annotation_name] = Annotation(annotation_name, reference=self.segmentation("tokens"))
self._annotations[annotation_name].annotations = annotation[:]
def add_chunking(self, sentence_tags, field, annotation_name):
nth_token = 0
annotation = []
for nth_sentence, tags in enumerate(sentence_tags):
for i in range(len(tags)):
self.corpus.sentences[nth_sentence][i][field] = tags[i]
self._annotations[annotation_name] = chunk_annotation_from_corpus(self.corpus, field, annotation_name, reference=self.segmentation("tokens"))
class SEMCorpus(Holder):
def __init__(self, documents=None, **kwargs):
super(SEMCorpus, self).__init__(**kwargs)
if documents is None:
self._documents = []
else:
self._documents = documents
def __getitem__(self, index):
return self._documents[index]
def __len__(self):
return len(self._documents)
def __iter__(self):
return iter(self._documents)
@property
def documents(self):
return self._documents
@classmethod
def from_xml(cls, xml, chunks_to_load=None, load_subtypes=True, type_separator=u"."):
if sem.misc.is_string(xml):
data = ET.parse(xml)
elif isinstance(xml, ET.ElementTree):
data = xml
elif isinstance(xml, type(ET.Element("a"))): # did not ind a better way to do this
data = xml
else:
raise TypeError("Invalid type for loading XML-SEM document: {0}".format(type(xml)))
root = data.getroot()
if root.tag != "sem":
raise ValueError("Not sem xml file type: '{0}'".format(root.tag))
doc_list = []
for document in list(root):
doc_list.append(Document.from_xml(document))
return SEMCorpus(doc_list)
def add_document(self, document):
ok = not any([d.name == document.name for d in self.documents])
if ok:
self._documents.append(document)
def write(self, f, indent=4):
f.write(u'<?xml version="1.0" encoding="{0}" ?>\n'.format(f.encoding or "ASCII"))
f.write(u"<sem>\n")
for document in self._documents:
document.write(f, depth=1, indent=indent, add_header=False)
f.write(u"</sem>")
str2docfilter = {
u"all documents" : lambda x,y : True,
u"only documents with annotations": lambda d, a : len(d.annotation(a) or []) > 0
}
| {
"content_hash": "3d3c5a5d79b47d4c4963e94e0646106e",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 210,
"avg_line_length": 43.06225680933852,
"alnum_prop": 0.5607662419806633,
"repo_name": "YoannDupont/SEM",
"id": "93ba6cd43f7f628b1555c6ab6fda3203b1f27b18",
"size": "22159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sem/storage/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "230"
},
{
"name": "CSS",
"bytes": "6940"
},
{
"name": "Python",
"bytes": "551652"
},
{
"name": "Shell",
"bytes": "199"
},
{
"name": "TeX",
"bytes": "159226"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
import os
import tct
import sys
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Get and check required milestone(s)
# --------------------------------------------------
def milestones_get(name, default=None):
result = milestones.get(name, default)
loglist.append((name, result))
return result
def facts_get(name, default=None):
result = facts.get(name, default)
loglist.append((name, result))
return result
def params_get(name, default=None):
result = params.get(name, default)
loglist.append((name, result))
return result
# ==================================================
# define
# --------------------------------------------------
lockfile_remove_logstamp = ''
lockfile_removed = ''
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('Bad PARAMS or nothing to do')
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
lockfile = milestones_get('lockfile')
if not (lockfile):
reason = "lockfile is missing"
exitcode = 22
if exitcode == CONTINUE:
if os.path.isfile(lockfile):
os.remove(lockfile)
lockfile_removed = lockfile
lockfile = ''
lockfile_remove_logstamp = tct.logstamp_finegrained()
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if exitcode == CONTINUE:
if lockfile_removed:
result['MILESTONES'].append({
'lockfile': lockfile,
'lockfile_removed': lockfile_removed,
'lockfile_remove_logstamp': lockfile_remove_logstamp,
})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
| {
"content_hash": "2b48608bf00c2a86c830324a1085f8ea",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 94,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.4970703125,
"repo_name": "marble/Toolchain_RenderDocumentation",
"id": "af945457ff796ad053181fe10acac4d265ab72fe",
"size": "3111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "90-Finish/run_20-Remove-lock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "98648"
},
{
"name": "Python",
"bytes": "745804"
},
{
"name": "Shell",
"bytes": "660"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("djangocms_page_meta", "0006_auto_20160423_1859"),
]
operations = [
migrations.AlterModelOptions(
name="genericmetaattribute",
options={
"verbose_name": "Page meta info (language-dependent)",
"verbose_name_plural": "Page meta info (language-dependent)",
},
),
migrations.AlterModelOptions(
name="pagemeta",
options={
"verbose_name": "Page meta info (all languages)",
"verbose_name_plural": "Page meta info (all languages)",
},
),
migrations.AlterModelOptions(
name="titlemeta",
options={
"verbose_name": "Page meta info (language-dependent)",
"verbose_name_plural": "Page meta info (language-dependent)",
},
),
migrations.AlterField(
model_name="genericmetaattribute",
name="attribute",
field=models.CharField(
blank=True, default="", help_text="Custom attribute", max_length=200, verbose_name="attribute"
),
),
migrations.AlterField(
model_name="genericmetaattribute",
name="name",
field=models.CharField(help_text="Meta attribute name", max_length=200, verbose_name="name"),
),
migrations.AlterField(
model_name="genericmetaattribute",
name="value",
field=models.CharField(help_text="Meta attribute value", max_length=2000, verbose_name="value"),
),
]
| {
"content_hash": "240c00792114bbc337f3608761aca09a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 110,
"avg_line_length": 35.02040816326531,
"alnum_prop": 0.5477855477855478,
"repo_name": "nephila/djangocms-page-meta",
"id": "69f55fce36fbd96e386e910fbbb2ae381716b9b9",
"size": "1765",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "djangocms_page_meta/migrations/0007_auto_20160530_2257.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "56"
},
{
"name": "HTML",
"bytes": "3442"
},
{
"name": "Python",
"bytes": "88884"
}
],
"symlink_target": ""
} |
import wandb
from wandb import util
from wandb.plots.utils import (
test_missing,
test_types,
encode_labels,
deprecation_notice,
)
chart_limit = wandb.Table.MAX_ROWS
def heatmap(x_labels, y_labels, matrix_values, show_text=False):
"""
Generates a heatmap.
Arguments:
matrix_values (arr): 2D dataset of shape x_labels * y_labels, containing
heatmap values that can be coerced into an ndarray.
x_labels (list): Named labels for rows (x_axis).
y_labels (list): Named labels for columns (y_axis).
show_text (bool): Show text values in heatmap cells.
Returns:
Nothing. To see plots, go to your W&B run page then expand the 'media' tab
under 'auto visualizations'.
Example:
wandb.log({'heatmap': wandb.plots.HeatMap(x_labels, y_labels,
matrix_values)})
"""
deprecation_notice()
np = util.get_module(
"numpy",
required="roc requires the numpy library, install with `pip install numpy`",
)
scikit = util.get_module(
"sklearn",
required="roc requires the scikit library, install with `pip install scikit-learn`",
)
if test_missing(
x_labels=x_labels, y_labels=y_labels, matrix_values=matrix_values
) and test_types(x_labels=x_labels, y_labels=y_labels, matrix_values=matrix_values):
matrix_values = np.array(matrix_values)
wandb.termlog("Visualizing heatmap.")
def heatmap_table(x_labels, y_labels, matrix_values, show_text):
x_axis = []
y_axis = []
values = []
count = 0
for i, x in enumerate(x_labels):
for j, y in enumerate(y_labels):
x_axis.append(x)
y_axis.append(y)
values.append(matrix_values[j][i])
count += 1
if count >= chart_limit:
wandb.termwarn(
"wandb uses only the first %d datapoints to create the plots."
% wandb.Table.MAX_ROWS
)
break
if show_text:
heatmap_key = "wandb/heatmap/v1"
else:
heatmap_key = "wandb/heatmap_no_text/v1"
return wandb.visualize(
heatmap_key,
wandb.Table(
columns=["x_axis", "y_axis", "values"],
data=[
[x_axis[i], y_axis[i], round(values[i], 2)]
for i in range(len(x_axis))
],
),
)
return heatmap_table(x_labels, y_labels, matrix_values, show_text)
| {
"content_hash": "d934c4072ee98ec067a21a3539ae45cd",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 92,
"avg_line_length": 34.01234567901235,
"alnum_prop": 0.5255898366606171,
"repo_name": "wandb/client",
"id": "073735a4f344ca66e48a7bcd6699972461d2a39c",
"size": "2755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wandb/plots/heatmap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
} |
import os
import time
import math
import sys
import argparse
import logging
import subprocess
import MPIfR_LOFAR_LuMP
import MPIfR_LOFAR_LuMP_Recorder_Common
################################################################################
# GLOBALS
################################################################################
PROGRAM_NAME = "LuMP_Pulsar_Cleanup.py"
PROGRAM_VERSION = MPIfR_LOFAR_LuMP.MPIfR_LOFAR_LuMP_VERSION_DATE_STRING
INFO_PRINT = "LOFAR_Station_Beamformed_Info_Dump"
############################################################################
def data_type_in_bytes(data_type):
if(data_type == "L_Complex32_t"):
return 4
elif(data_type == "L_Complex64_t"):
return 8
elif(data_type == "L_Complex128_t"):
return 16
elif(data_type == "L_Complex160_t"):
return 20
elif(data_type == "L_Complex256_t"):
return 32
elif(data_type == "L_intComplex8_t"):
return 1
elif(data_type == "L_intComplex16_t"):
return 2
elif(data_type == "L_intComplex32_t"):
return 4
elif(data_type == "L_intComplex64_t"):
return 8
elif(data_type == "L_intComplex128_t"):
return 16
else:
logging.error("Unknown LOFAR_raw_data_type_enum value '%s'", data_type)
raise RuntimeError("Unknown LOFAR_raw_data_type_enum value")
################################################################################
def get_info(filename_base):
sp = subprocess.Popen([INFO_PRINT,filename_base],
bufsize=-1,
stdout=subprocess.PIPE)
data_type = None
seconds_per_output = None
num_channels = None
time_center_output_zero = None
beamlets_per_sample = None
writer_type = None
out_data_size = None
for line in sp.stdout:
if(line.startswith("WRITER_TYPE ")):
t = line.split()[1]
if((t == "LOFAR_DOFF_LuMP0_OUT")
or (t == "LOFAR_DOFF_LuMP1_OUT")):
pass
else:
logging.error("Not a LuMP pulsar format. Found '%s' instead", t)
raise RuntimeError("Not LuMP")
writer_type = t
elif(line.startswith("output_DATA_TYPE")):
data_type = line.split()[1].strip()
elif(line.startswith("seconds_per_output")):
seconds_per_output = float(line.split()[1])
elif(line.startswith("NUM_CHANNELS")):
num_channels = int(line.split()[1])
elif(line.startswith("time_center_output_zero")):
time_center_output_zero = float(line.split()[1])
elif(line.startswith("beamlets_per_sample")):
beamlets_per_sample = int(line.split()[1])
if((writer_type in ["LOFAR_DOFF_LuMP0_OUT", "LOFAR_DOFF_LuMP1_OUT"])
and (num_channels == 1)):
pass
else:
if((writer_type is None) or (num_channels is None)):
raise RuntimeError("could not read LuMP file")
logging.error("NUM_CHANNELS == %d, not 1. This is not a valid %s output dataset", num_channels, writer_type)
raise RuntimeError("bad NUM_CHANNELS for writer type")
return [data_type, seconds_per_output, num_channels, time_center_output_zero, beamlets_per_sample, writer_type, out_data_size]
############################################################################
def override_header_val(header, name, value, chars):
pos = header.find(name)
if(pos < 0):
logging.error("Cannot find keyword '%s' in header", name)
raise RuntimeError("keyword not found")
pos += len(name)
if(name[-1] != ' '):
pos += 1
s = "%-" + "%d"%chars + "s"
s = s%value
header = header[:pos] + s + header[pos+chars:]
return header
############################################################################
def fix_header(filename, offset, start, source, RA, Dec, obs_id, telescope,
pipe):
fp = open(filename, "r+")
header = fp.read(4096)
if(offset == 0):
pass
elif(offset > 0):
keyword = "\nHDR_SIZE"
size = header.find(keyword)
if(size < 0):
logging.error("Cannot find 'HDR_SIZE' in header")
raise RuntimeError("no HDR_SIZE")
size += len(keyword)+1
size = int(header[size:].split(None,1)[0])
size += offset
size = "%d"%size
header = override_header_val(header, keyword, size, 8)
keyword = "\nUTC_OFFSET_START_CENTER"
pos = header.find(keyword)
if(pos < 0):
logging.error("Cannot find 'UTC_OFFSET_START_CENTER' in header")
raise RuntimeError("no UTC_OFFSET_START_CENTER")
pos += len(keyword)+1
start_c = float(header[pos:].split(None,1)[0])
header = override_header_val(header, keyword, "%.17E"%start, 24)
diff = start-start_c
keyword = "\nUTC_OFFSET_START_LEADING"
pos = header.find(keyword)
if(pos < 0):
logging.error("Cannot find 'UTC_OFFSET_START_LEADING' in header")
raise RuntimeError("no UTC_OFFSET_START_LEADING")
pos += len(keyword)+1
start_l = float(header[pos:].split(None,1)[0])
header = override_header_val(header, keyword, "%.17E"%(start_l+diff), 24)
else:
logging.error("negative offset %d not allowed", offset)
raise RuntimeError("bad offset")
if(source is not None):
header = override_header_val(header, "\nSOURCE", source, 558-525)
if(RA is not None):
header = override_header_val(header, "\nRA ", RA, 600-563)
if(Dec is not None):
header = override_header_val(header, "\nDEC ", Dec, 642-606)
if(obs_id is not None):
header = override_header_val(header, "\nOBS_ID", obs_id, 124-91)
if(telescope is not None):
header = override_header_val(header, "\nTELESCOPE", telescope, 684-654)
if(pipe is not None):
header = override_header_val(header, "\nREAD_DATA_FROM_PIPE", pipe, 1409-1403)
fp.seek(0)
fp.write(header)
fp.close()
return
############################################################################
def main():
p = argparse.ArgumentParser(description="Python program clean up LuMP pulsar datafiles after running LOFAR_Station_Beamformed_Recorder on a single LOFAR recording computer.", epilog="See the accompanying manual for more information.")
# General setup
p.add_argument("--datadir", type=str, default=".", help="*OPTIONAL* Name of the directory of the main data recording area in which the LuMP files reside. An absolute or a relative path name may be specified. The datadir '.' may also be specified.", required=False)
p.add_argument("--filename_base",action="append", type=str,help="*REQUIRED* The base filename of the data. If multiple writers were used for the same pulsar, then multiple filename_base arguments should be given, with the various base filenames", required=True)
p.add_argument("--source_name", type=str, default=None, help="*OPTIONAL* The name of the pulsar", required=False)
p.add_argument("--source_RA", type=str, default=None, help="*OPTIONAL* The right ascension of the pulsar as a string, in standard HH:MM:SS.SSS format.", required=False)
p.add_argument("--source_Dec", type=str, default=None, help="*OPTIONAL* The declination of the pulsar as a string, in the standard +-DD:MM:SS.SS format.", required=False)
p.add_argument("--obs_id", default=None, type=str, help="*OPTIONAL* Observation ID of this observation, as a string.")
p.add_argument("--telescope", default=None, type=str, help="*OPTIONAL* Fake telescope name to provide, when downstream pulsar software does not understand the LOFAR station name provided.")
p.add_argument("--use_pipe", type=str, default=None, help="*OPTIONAL* Speficy whether to read the data from a pipe (True) or to not do this (False).", required=False)
# Information for the user
p = MPIfR_LOFAR_LuMP_Recorder_Common.set_option_parsing_user_info(p,PROGRAM_VERSION)
if((len(sys.argv) == 2) and (sys.argv[1] == "--stdin")):
argv = MPIfR_LOFAR_LuMP_Recorder_Common.read_arguments_from_stdin()
options = p.parse_args(argv)
else:
options = p.parse_args()
MPIfR_LOFAR_LuMP_Recorder_Common.change_to_data_directory(options.datadir)
MPIfR_LOFAR_LuMP_Recorder_Common.setup_logging(PROGRAM_NAME,options)
info = []
start = -1E300
for fb in options.filename_base:
i = get_info(fb)
info.append(i)
start = max(start,i[3])
# check for offsets between files
align_files = True
for i in info:
diff = start-i[3]
num = diff/i[1]
err = num-math.floor(num+0.5)
if(abs(err) < 0.001):
pass
else:
logging.warning("file datapoint alignment not possible")
align_files = False
# Now run through all of the data files and fix them
for i,fb in enumerate(options.filename_base):
# For the LuMP0 format, with 1 subband per file,
# bytes_per_line = num channels * bytes_per_samp * 2 polarizations
# For the LuMP1 format, with 1 subband per file,
# bytes_per_line = num channels * bytes_per_samp * 2 polarizations
# * beamlets_per_sample
bytes_per_samp = data_type_in_bytes(info[i][0])
if(info[i][5] == "LOFAR_DOFF_LuMP0_OUT"):
bytes_per_line = info[i][2] * bytes_per_samp * 2
elif(info[i][5] == "LOFAR_DOFF_LuMP1_OUT"):
bytes_per_line = info[i][2] * bytes_per_samp * 2 * info[i][4]
else:
logging.error("unsupported writer type '%s'", info[i][5])
raise RuntimeError("unsupported writer type")
offset = 0
if(align_files):
diff = start-info[i][3]
num = diff/info[i][1]
num = int(math.floor(num+0.5))
offset = num * bytes_per_line
file_filename = fb + ".file_lis"
fp = open(file_filename, "r")
for datafile in fp:
filename = datafile
if(filename[-1] == '\n'):
filename=filename[:-1]
fix_header(filename, offset, start,
options.source_name,
options.source_RA, options.source_Dec,
options.obs_id, options.telescope,
options.use_pipe)
logging.info("command finished ok")
if __name__ == "__main__":
main()
| {
"content_hash": "b83136a4f3b041488192c0c7fbe454f9",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 271,
"avg_line_length": 40.748062015503876,
"alnum_prop": 0.5747170170265385,
"repo_name": "AHorneffer/lump-lofar-und-mpifr-pulsare",
"id": "9318a19b0769e56b723f2af9444412cde6b61214",
"size": "11576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lump/src/lump/LuMP_Pulsar_Cleanup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9516"
},
{
"name": "C++",
"bytes": "2434961"
},
{
"name": "Makefile",
"bytes": "33040"
},
{
"name": "Python",
"bytes": "198357"
},
{
"name": "Shell",
"bytes": "113867"
}
],
"symlink_target": ""
} |
import unittest
import parlai.utils.testing as testing_utils
from parlai.core.agents import create_agent
@testing_utils.skipUnlessGPU
class TestDialogptModel(unittest.TestCase):
"""
Test of DialoGPT model.
"""
def _test_batchsize(self, batchsize, add_start_token):
utterances = [
'How is your day so far?',
'I hope you you have a good day.',
"Nice to meet you. My name is John. ",
"I've got a feeling we're not in Kansas anymore.",
]
opt = {
'model': 'hugging_face/dialogpt',
'gpt2_size': 'small',
'text_truncate': 100,
'label_truncate': 20,
'beam_min_length': 1,
'inference': 'beam',
'beam_size': 1,
'add_special_tokens': True,
'batchsize': batchsize,
'add_start_token': add_start_token,
}
dialogpt = create_agent(opt)
results_single = []
agents = [dialogpt.clone() for _ in utterances]
for u, a in zip(utterances, agents):
a.observe({'text': u, 'episode_done': True})
generation = a.act()['text']
results_single.append(generation)
results_batched = []
for idx in range(len(utterances) // batchsize):
agents = [dialogpt.clone() for _ in range(batchsize)]
batch = utterances[idx * batchsize : (idx + 1) * batchsize]
obs = []
for i, a in enumerate(agents):
obs.append(a.observe({'text': batch[i], 'episode_done': True}))
generations = [x['text'] for x in dialogpt.batch_act(obs)]
results_batched += generations
assert results_single == results_batched
def test_batchsize(self):
"""
Ensures dialogpt provides the same generation results regardless of batchsize.
"""
# Test throwing the RuntimeError with add_special_tokens = False and batchsize > 1
with self.assertRaises(RuntimeError):
create_agent(
{
'model': 'hugging_face/dialogpt',
'add_special_tokens': False,
'batchsize': 2,
}
)
for batchsize in [1, 2, 4]:
for add_start_token in [True, False]:
with self.subTest(
f'test_batchsize with bs={batchsize} and add_start_token={add_start_token}'
):
self._test_batchsize(batchsize, add_start_token)
def test_start_token(self):
"""
Test RuntimeError is thrown when add_start_token = True and yet add_special_tokens = False
"""
with self.assertRaises(RuntimeError):
create_agent(
{
'model': 'hugging_face/dialogpt',
'add_special_tokens': False,
'add_start_token': True,
}
)
def test_nospecialtok(self):
"""
Test generation consistency for off-the-shelf dialogpt models.
"""
test_cases = [
("What a nice weather!", "I'm in the UK and it's raining here."),
("Nice to meet you!", "Hello! I'm from the future!"),
]
opt = {
'model': 'hugging_face/dialogpt',
'gpt2_size': 'small',
'text_truncate': 100,
'label_truncate': 20,
'beam_min_length': 1,
'inference': 'beam',
'beam_size': 1,
'add_special_tokens': False,
'batchsize': 1,
}
dialogpt = create_agent(opt)
for text, label in test_cases:
dialogpt.observe({'text': text, 'episode_done': True})
response = dialogpt.act()
assert response['text'] == label
def test_dialogpt(self):
"""
Checks that DialoGPT gets a certain performance on the integration test task.
"""
valid, test = testing_utils.train_model(
dict(
task='integration_tests:overfit',
model='hugging_face/dialogpt',
add_special_tokens=True,
add_start_token=True,
optimizer='adam',
learningrate=1e-3,
batchsize=1,
num_epochs=100,
validation_every_n_epochs=5,
validation_metric='ppl',
short_final_eval=True,
skip_generation=True,
)
)
self.assertLessEqual(valid['ppl'], 4.0)
self.assertLessEqual(test['ppl'], 4.0)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "eaf417976f8c7f224270f3c7dd32611d",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 98,
"avg_line_length": 34.262773722627735,
"alnum_prop": 0.5108649339582446,
"repo_name": "facebookresearch/ParlAI",
"id": "aa647d108548c413e5dd1e65b8f9d584a354b521",
"size": "4894",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/nightly/gpu/test_dialogpt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
import json
import pathlib
import yaml
class ConfigReader:
def __init__(self, path: pathlib.Path):
self.config_path = path
def import_config(self):
path = self.config_path
content = open(path).read()
if path.suffix in [".yaml", ".yml"]:
return yaml.load(content, Loader=yaml.Loader)
elif path.suffix == ".json":
return json.loads(content)
else:
raise NotImplementedError(f"{path.suffix} not supported in {path}")
| {
"content_hash": "2bb30f447d4ebe112a57db10423c624a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.5992141453831041,
"repo_name": "cihai/cihai",
"id": "28e2d515545c06f16678dc759fefb5aa85832cf2",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cihai/config_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1685"
},
{
"name": "Python",
"bytes": "56145"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
import re
import pytest
from waterbutler.providers.github.path import GitHubPath
class TestGitHubPath:
def test_id_accessors(self):
gh_path = GitHubPath('/foo', _ids=[('master', None), ('master', 'abcea54as123')])
assert gh_path.branch_ref == 'master'
assert gh_path.file_sha == 'abcea54as123'
def test_child_inherits_branch(self):
gh_parent = GitHubPath('/foo/', _ids=[('master', None), ('master', 'abcea54as123')])
gh_child = gh_parent.child('foo')
assert gh_child.branch_ref == 'master'
assert gh_child.file_sha is None
def test_child_given_explicit_branch(self):
gh_parent = GitHubPath('/foo/', _ids=[('master', None), ('master', 'abcea54as123')])
gh_child = gh_parent.child('foo', _id=('develop', '413006763'))
assert gh_child.branch_ref == 'develop'
assert gh_child.file_sha == '413006763'
| {
"content_hash": "0efb34c24da66985ae1ae04b79b77794",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 92,
"avg_line_length": 32.357142857142854,
"alnum_prop": 0.6225165562913907,
"repo_name": "Johnetordoff/waterbutler",
"id": "3904d4becee5bbff8e3aad7b0682cd2644a67620",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/providers/github/test_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "877100"
}
],
"symlink_target": ""
} |
import windmill
import exceptions
import os, sys, shutil, time, signal
import killableprocess
import logging
if sys.platform == "win32":
import _winreg as wreg
if sys.platform == "cygwin":
import cygwinreg as wreg
logger = logging.getLogger(__name__)
class InternetExplorer(object):
registry_modifications = {'MigrateProxy': {'type': wreg.REG_DWORD, 'new_value':1},
'ProxyEnable': {'type': wreg.REG_DWORD, 'new_value':1},
'ProxyHttp1.1': {'type': wreg.REG_DWORD, 'new_value':1},
'ProxyServer': {'type': wreg.REG_SZ}}
def __init__(self):
self.proxy_port = windmill.settings['SERVER_HTTP_PORT']
self.test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
self.registry_modifications['ProxyServer']['new_value'] = "http=localhost:%s" % self.proxy_port
if windmill.has_ssl:
self.registry_modifications['ProxyServer']['new_value'] += ';https=localhost:%s' % self.proxy_port
self.reg = wreg.OpenKey(wreg.HKEY_CURRENT_USER,
"Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings", 0, wreg.KEY_ALL_ACCESS)
for key, value in self.registry_modifications.items():
try:
result = wreg.QueryValueEx(self.reg, key)
self.registry_modifications[key]['previous_value'] = result[0]
except exceptions.WindowsError:
self.registry_modifications[key]['previous_value'] = None
self.ie_binary = windmill.settings['IE_BINARY']
self.cmd = [self.ie_binary, self.test_url]
def set_proxy(self):
for key, value in self.registry_modifications.items():
wreg.SetValueEx(self.reg, key, 0, value['type'], value['new_value'])
def unset_proxy(self):
for key, value in self.registry_modifications.items():
if value['previous_value'] is not None:
wreg.SetValueEx(self.reg, key, 0, value['type'], value['previous_value'])
else:
wreg.DeleteValue(self.reg, key)
def start(self):
"""Start IE"""
self.set_proxy()
# allow_reg = wreg.OpenKey(wreg.HKEY_CURRENT_USER,
# "Software\\Microsoft\\Internet Explorer\\New Windows\\Allow", 0, wreg.KEY_ALL_ACCESS)
#
# wreg.SetValueEx(allow_reg, urlparse(windmill.settings['TEST_URL']).hostname,
# 0, wreg.REG_BINARY, None)
#Workaround for but in nose
if hasattr(sys.stdout, 'fileno'):
kwargs = {'stdout':sys.stdout ,'stderr':sys.stderr, 'stdin':sys.stdin}
else:
kwargs = {'stdout':sys.__stdout__ ,'stderr':sys.__stderr__, 'stdin':sys.stdin}
self.p_handle = killableprocess.Popen(self.cmd, **kwargs)
def stop(self):
"""Stop IE"""
self.unset_proxy()
try:
self.p_handle.kill(group=True)
except:
logger.error('Cannot kill Internet Explorer')
def is_alive(self):
if self.p_handle.poll() is None:
return False
try:
self.p_handle.kill(group=True)
return True
except exceptions.OSError:
return False
| {
"content_hash": "9ade0e8fbc195207cc4433722b9cdaea",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 122,
"avg_line_length": 39.22727272727273,
"alnum_prop": 0.5538818076477404,
"repo_name": "ept/windmill",
"id": "344adf1c3ba9fabae0ac216b2a77882a81dace10",
"size": "4085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windmill/browser/ie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "653334"
},
{
"name": "Python",
"bytes": "337875"
},
{
"name": "Shell",
"bytes": "49"
}
],
"symlink_target": ""
} |
"""File IO methods that wrap the C++ FileSystem API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import os
import uuid
import six
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import _pywrap_file_io
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# A good default block size depends on the system in question.
# A somewhat conservative default chosen here.
_DEFAULT_BLOCK_SIZE = 16 * 1024 * 1024
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: [path-like object](https://docs.python.org/3/glossary.html#term-path-like-object)
giving the pathname of the file to be opened.
mode: one of `r`, `w`, `a`, `r+`, `w+`, `a+`. Append `b` for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
self._read_buf = _pywrap_file_io.BufferedInputStream(
compat.path_to_str(self.__name), 1024 * 512)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
self._writable_file = _pywrap_file_io.WritableFile(
compat.path_to_bytes(self.__name), compat.as_bytes(self.__mode))
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
self._writable_file.append(compat.as_bytes(file_content))
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read `n` bytes if `n != -1`. If `n = -1`, reads to end of file.
Returns:
`n` bytes of the file (or whole file) in bytes mode or `n` bytes of the
string if in string (regular) mode.
"""
self._preread_check()
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(self._read_buf.read(length))
@deprecation.deprecated_args(
None, "position is deprecated in favor of the offset argument.",
"position")
def seek(self, offset=None, whence=0, position=None):
# TODO(jhseu): Delete later. Used to omit `position` from docs.
# pylint: disable=g-doc-args
"""Seeks to the offset in the file.
Args:
offset: The byte count relative to the whence argument.
whence: Valid values for whence are:
0: start of the file (default)
1: relative to the current position of the file
2: relative to the end of file. `offset` is usually negative.
"""
# pylint: enable=g-doc-args
self._preread_check()
# We needed to make offset a keyword argument for backwards-compatibility.
# This check exists so that we can convert back to having offset be a
# positional argument.
# TODO(jhseu): Make `offset` a positional argument after `position` is
# deleted.
if offset is None and position is None:
raise TypeError("seek(): offset argument required")
if offset is not None and position is not None:
raise TypeError("seek(): offset and position may not be set "
"simultaneously.")
if position is not None:
offset = position
if whence == 0:
pass
elif whence == 1:
offset += self.tell()
elif whence == 2:
offset += self.size()
else:
raise errors.InvalidArgumentError(
None, None,
"Invalid whence argument: {}. Valid values are 0, 1, or 2.".format(
whence))
self._read_buf.seek(offset)
def readline(self):
r"""Reads the next line, keeping \n. At EOF, returns ''."""
self._preread_check()
return self._prepare_value(self._read_buf.readline())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
if self._read_check_passed:
self._preread_check()
return self._read_buf.tell()
else:
self._prewrite_check()
return self._writable_file.tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def __next__(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def next(self):
return self.__next__()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
self._writable_file.flush()
def close(self):
r"""Closes the file.
Should be called for the WritableFile to be flushed.
In general, if you use the context manager pattern, you don't need to call
this directly.
>>> with tf.io.gfile.GFile("/tmp/x", "w") as f:
... f.write("asdf\n")
... f.write("qwer\n")
>>> # implicit f.close() at the end of the block
For cloud filesystems, forgetting to call `close()` might result in data
loss as last write might not have been replicated.
"""
self._read_buf = None
if self._writable_file:
self._writable_file.close()
self._writable_file = None
def seekable(self):
"""Returns True as FileIO supports random access ops of seek()/tell()"""
return True
@tf_export("io.gfile.exists")
def file_exists_v2(path):
"""Determines whether a path exists or not.
>>> with open("/tmp/x", "w") as f:
... f.write("asdf")
...
4
>>> tf.io.gfile.exists("/tmp/x")
True
You can also specify the URI scheme for selecting a different filesystem:
>>> # for a GCS filesystem path:
>>> # tf.io.gfile.exists("gs://bucket/file")
>>> # for a local filesystem:
>>> with open("/tmp/x", "w") as f:
... f.write("asdf")
...
4
>>> tf.io.gfile.exists("file:///tmp/x")
True
This currently returns `True` for existing directories but don't rely on this
behavior, especially if you are using cloud filesystems (e.g., GCS, S3,
Hadoop):
>>> tf.io.gfile.exists("/tmp")
True
Args:
path: string, a path
Returns:
True if the path exists, whether it's a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
_pywrap_file_io.FileExists(compat.path_to_bytes(path))
except errors.NotFoundError:
return False
return True
@tf_export(v1=["gfile.Exists"])
def file_exists(filename):
return file_exists_v2(filename)
file_exists.__doc__ = file_exists_v2.__doc__
@tf_export(v1=["gfile.Remove"])
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
`NotFoundError` if the file does not exist.
"""
delete_file_v2(filename)
@tf_export("io.gfile.remove")
def delete_file_v2(path):
"""Deletes the path located at 'path'.
Args:
path: string, a path
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
`NotFoundError` if the path does not exist.
"""
_pywrap_file_io.DeleteFile(compat.path_to_bytes(path))
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
`NotFoundError` etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
@tf_export(v1=["gfile.Glob"])
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
Args:
filename: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
* errors.OpError: If there are filesystem / directory listing errors.
* errors.NotFoundError: If pattern to be matched is an invalid directory.
"""
return get_matching_files_v2(filename)
@tf_export("io.gfile.glob")
def get_matching_files_v2(pattern):
r"""Returns a list of files that match the given pattern(s).
The patterns are defined as strings. Supported patterns are defined
here. Note that the pattern can be a Python iteratable of string patterns.
The format definition of the pattern is:
**pattern**: `{ term }`
**term**:
* `'*'`: matches any sequence of non-'/' characters
* `'?'`: matches a single non-'/' character
* `'[' [ '^' ] { match-list } ']'`: matches any single
character (not) on the list
* `c`: matches character `c` where `c != '*', '?', '\\', '['`
* `'\\' c`: matches character `c`
**character range**:
* `c`: matches character `c` while `c != '\\', '-', ']'`
* `'\\' c`: matches character `c`
* `lo '-' hi`: matches character `c` for `lo <= c <= hi`
Examples:
>>> tf.io.gfile.glob("*.py")
... # For example, ['__init__.py']
>>> tf.io.gfile.glob("__init__.??")
... # As above
>>> files = {"*.py"}
>>> the_iterator = iter(files)
>>> tf.io.gfile.glob(the_iterator)
... # As above
See the C++ function `GetMatchingPaths` in
[`core/platform/file_system.h`]
(../../../core/platform/file_system.h)
for implementation details.
Args:
pattern: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
errors.OpError: If there are filesystem / directory listing errors.
errors.NotFoundError: If pattern to be matched is an invalid directory.
"""
if isinstance(pattern, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in _pywrap_file_io.GetMatchingFiles(
compat.as_bytes(pattern))
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename) # pylint: disable=g-complex-comprehension
for single_filename in pattern
for matching_filename in _pywrap_file_io.GetMatchingFiles(
compat.as_bytes(single_filename))
]
@tf_export(v1=["gfile.MkDir"])
def create_dir(dirname):
"""Creates a directory with the name `dirname`.
Args:
dirname: string, name of the directory to be created
Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs`
instead if there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
create_dir_v2(dirname)
@tf_export("io.gfile.mkdir")
def create_dir_v2(path):
"""Creates a directory with the name given by `path`.
Args:
path: string, name of the directory to be created
Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs`
instead if there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.CreateDir(compat.path_to_bytes(path))
@tf_export(v1=["gfile.MakeDirs"])
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
recursive_create_dir_v2(dirname)
@tf_export("io.gfile.makedirs")
def recursive_create_dir_v2(path):
"""Creates a directory and all parent/intermediate directories.
It succeeds if path already exists and is writable.
Args:
path: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))
@tf_export("io.gfile.copy")
def copy_v2(src, dst, overwrite=False):
"""Copies data from `src` to `dst`.
>>> with open("/tmp/x", "w") as f:
... f.write("asdf")
...
4
>>> tf.io.gfile.exists("/tmp/x")
True
>>> tf.io.gfile.copy("/tmp/x", "/tmp/y")
>>> tf.io.gfile.exists("/tmp/y")
True
>>> tf.io.gfile.remove("/tmp/y")
You can also specify the URI scheme for selecting a different filesystem:
>>> with open("/tmp/x", "w") as f:
... f.write("asdf")
...
4
>>> tf.io.gfile.copy("/tmp/x", "file:///tmp/y")
>>> tf.io.gfile.exists("/tmp/y")
True
>>> tf.io.gfile.remove("/tmp/y")
Note that you need to always specify a file name, even if moving into a new
directory. This is because some cloud filesystems don't have the concept of a
directory.
>>> with open("/tmp/x", "w") as f:
... f.write("asdf")
...
4
>>> tf.io.gfile.mkdir("/tmp/new_dir")
>>> tf.io.gfile.copy("/tmp/x", "/tmp/new_dir/y")
>>> tf.io.gfile.exists("/tmp/new_dir/y")
True
>>> tf.io.gfile.rmtree("/tmp/new_dir")
If you want to prevent errors if the path already exists, you can use
`overwrite` argument:
>>> with open("/tmp/x", "w") as f:
... f.write("asdf")
...
4
>>> tf.io.gfile.copy("/tmp/x", "file:///tmp/y")
>>> tf.io.gfile.copy("/tmp/x", "file:///tmp/y", overwrite=True)
>>> tf.io.gfile.remove("/tmp/y")
Note that the above will still result in an error if you try to overwrite a
directory with a file.
Note that you cannot copy a directory, only file arguments are supported.
Args:
src: string, name of the file whose contents need to be copied
dst: string, name of the file to which to copy to
overwrite: boolean, if false it's an error for `dst` to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.CopyFile(
compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)
@tf_export(v1=["gfile.Copy"])
def copy(oldpath, newpath, overwrite=False):
copy_v2(oldpath, newpath, overwrite)
copy.__doc__ = copy_v2.__doc__
@tf_export(v1=["gfile.Rename"])
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
rename_v2(oldname, newname, overwrite)
@tf_export("io.gfile.rename")
def rename_v2(src, dst, overwrite=False):
"""Rename or move a file / directory.
Args:
src: string, pathname for a file
dst: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `dst` to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.RenameFile(
compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)
def atomic_write_string_to_file(filename, contents, overwrite=True):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
overwrite: boolean, if false it's an error for `filename` to be occupied by
an existing file.
"""
if not has_atomic_move(filename):
write_string_to_file(filename, contents)
else:
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
try:
rename(temp_pathname, filename, overwrite)
except errors.OpError:
delete_file(temp_pathname)
raise
@tf_export(v1=["gfile.DeleteRecursively"])
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
delete_recursively_v2(dirname)
@tf_export("io.gfile.rmtree")
def delete_recursively_v2(path):
"""Deletes everything under path recursively.
Args:
path: string, a path
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))
@tf_export(v1=["gfile.IsDirectory"])
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
return is_directory_v2(dirname)
@tf_export("io.gfile.isdir")
def is_directory_v2(path):
"""Returns whether the path is a directory or not.
Args:
path: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
try:
return _pywrap_file_io.IsDirectory(compat.path_to_bytes(path))
except errors.OpError:
return False
def has_atomic_move(path):
"""Checks whether the file system supports atomic moves.
Returns whether or not the file system of the given path supports the atomic
move operation for a file or folder. If atomic move is supported, it is
recommended to use a temp location for writing and then move to the final
location.
Args:
path: string, path to a file
Returns:
True, if the path is on a file system that supports atomic move
False, if the file system does not support atomic move. In such cases
we need to be careful about using moves. In some cases it is safer
not to use temporary locations in this case.
"""
try:
return _pywrap_file_io.HasAtomicMove(compat.path_to_bytes(path))
except errors.OpError:
# defaults to True
return True
@tf_export(v1=["gfile.ListDirectory"])
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
return list_directory_v2(dirname)
@tf_export("io.gfile.listdir")
def list_directory_v2(path):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
path: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(path):
raise errors.NotFoundError(
node_def=None,
op=None,
message="Could not find directory {}".format(path))
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in _pywrap_file_io.GetChildren(compat.path_to_bytes(path))
]
@tf_export(v1=["gfile.Walk"])
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False. Errors that
happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files. That is, each yield looks like:
`(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`.
Each item is a string.
"""
return walk_v2(top, in_order)
@tf_export("io.gfile.walk")
def walk_v2(top, topdown=True, onerror=None):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
topdown: bool, Traverse pre order if True, post order if False.
onerror: optional handler for errors. Should be a function, it will be
called with the error as argument. Rethrowing the error aborts the walk.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files. That is, each yield looks like:
`(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`.
Each item is a string.
"""
def _make_full_path(parent, item):
# Since `os.path.join` discards paths before one that starts with the path
# separator (https://docs.python.org/3/library/os.path.html#os.path.join),
# we have to manually handle that case as `/` is a valid character on GCS.
if item[0] == os.sep:
return "".join([os.path.join(parent, ""), item])
return os.path.join(parent, item)
top = compat.as_str_any(compat.path_to_str(top))
try:
listing = list_directory(top)
except errors.NotFoundError as err:
if onerror:
onerror(err)
else:
return
files = []
subdirs = []
for item in listing:
full_path = _make_full_path(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if topdown:
yield here
for subdir in subdirs:
for subitem in walk_v2(
_make_full_path(top, subdir), topdown, onerror=onerror):
yield subitem
if not topdown:
yield here
@tf_export(v1=["gfile.Stat"])
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
return stat_v2(filename)
@tf_export("io.gfile.stat")
def stat_v2(path):
"""Returns file statistics for a given path.
Args:
path: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
return _pywrap_file_io.Stat(compat.path_to_str(path))
def filecmp(filename_a, filename_b):
"""Compare two files, returning True if they are the same, False otherwise.
We check size first and return False quickly if the files are different sizes.
If they are the same size, we continue to generating a crc for the whole file.
You might wonder: why not use Python's `filecmp.cmp()` instead? The answer is
that the builtin library is not robust to the many different filesystems
TensorFlow runs on, and so we here perform a similar comparison with
the more robust FileIO.
Args:
filename_a: string path to the first file.
filename_b: string path to the second file.
Returns:
True if the files are the same, False otherwise.
"""
size_a = FileIO(filename_a, "rb").size()
size_b = FileIO(filename_b, "rb").size()
if size_a != size_b:
return False
# Size is the same. Do a full check.
crc_a = file_crc32(filename_a)
crc_b = file_crc32(filename_b)
return crc_a == crc_b
def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):
"""Get the crc32 of the passed file.
The crc32 of a file can be used for error checking; two files with the same
crc32 are considered equivalent. Note that the entire file must be read
to produce the crc32.
Args:
filename: string, path to a file
block_size: Integer, process the files by reading blocks of `block_size`
bytes. Use -1 to read the file as once.
Returns:
hexadecimal as string, the crc32 of the passed file.
"""
crc = 0
with FileIO(filename, mode="rb") as f:
chunk = f.read(n=block_size)
while chunk:
crc = binascii.crc32(chunk, crc)
chunk = f.read(n=block_size)
return hex(crc & 0xFFFFFFFF)
| {
"content_hash": "658ddad526856ae3efe1d0e827aba0a8",
"timestamp": "",
"source": "github",
"line_count": 926,
"max_line_length": 89,
"avg_line_length": 28.370410367170628,
"alnum_prop": 0.6614517909481938,
"repo_name": "frreiss/tensorflow-fred",
"id": "faf39d03d8e35275174825bf800f8cf362456c4a",
"size": "26960",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/lib/io/file_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
STA 9792 Assignment 2 compute unsmooth ngram
Created on Tue Sep 6 19:28:42 2017
@author: Qijun (Vince) Chen
"""
import re
import pandas as pd
#import matplotlib.pyplot as plt
def unsmooth_ngram(inputFile, n=1, freq=1): # n=1: unigram, n=2: bigram, etc. # the count paramter define the desired minimum expression frenquencies
# read file into a string
myFile = open(inputFile, 'r')
myText = str(myFile.read()) # .readlines() create the /n object which cannot be replaced by pattern "/n"
# use re.sub() to clean the text and tranform to a list
myTextCleaned = re.sub(r'[<>]', "", myText)
myTextCleaned = re.sub(r"[\.\,\!\?\;]\s", " <e> <s> ", myTextCleaned)
myTextCleaned = re.sub(r"[^(A-Za-z0-9<>\s)]", "", myTextCleaned)
myTextCleaned = "<s> " + myTextCleaned + " <e>"
myTextSplit = re.split(r"\s+", myTextCleaned)
# deal with capitalization.
# If the first letter is capitalize, very high changes are that they are at the beginning of a sentence and we need to transform them into lower case.
# But we keep the words that are not in the beginning of a sentence
for i in range(int(len(myTextSplit) -n + 1)):
if myTextSplit[i] == "<s>":
myTextSplit[i+1] = myTextSplit[i+1].lower()
# initialize variable my_dict with type dictionary to store n-gram statistics
my_dict= {}
# compute n-gram
for i in range(len(myTextSplit) - n + 1):
gram = ' '.join(myTextSplit[i:i+n])
my_dict.setdefault(gram, 0)
my_dict[gram] += 1
# turn dictionary into a pandas DataFrame
df = pd.DataFrame(my_dict, index=['Frequencies'])
columns_sorted = df.columns[df.iloc[0].argsort()[::-1]]
df = df[columns_sorted].transpose()
# construct df_filter for user views
df_filter = df.loc[df.index.values != "<e> <s>"] # get rid of sentence boundaries
if n == 1:
total_freq = df['Frequencies'].sum() - int(df.loc[df.index.values == "<e>"]['Frequencies']) - int(df.loc[df.index.values == "<s>"]['Frequencies'])
df_filter.index.name = 'Unigram: ' + str(inputFile)
elif n == 2:
total_freq = df['Frequencies'].sum() - int(df.loc[df.index.values == "<e> <s>"]['Frequencies'])
df_filter.index.name = 'Bigrams: ' + str(inputFile)
else:
df_filter.index.name = str(n) + 'grams: ' + str(inputFile)
df_filter = df_filter.loc[df[df.columns[0]] >= freq] # filter phrases based on user-defined minimum frequencies
df_filter['Percentage(%)'] = round(df_filter['Frequencies'] / total_freq * 100, 2) # add a column to the df to display the percentage of a phrase
'''
# The followings are for analysis (optional):
# Write the DataFrame to a csv file
df_filter.to_csv("Word Frequency %s-gram_%s.csv" %(n, str(inputFile)[:str(inputFile).find(".")]))
# plot word frequencies
plt.style.use("ggplot")
plt.figure()
df_filter['Frequencies'].head(5).plot.barh(color="#5487d8", align='center')
plt.title("%s\nTop ten %s-gram frequencies" %(inputFile, n))
plt.xlabel('Frequencies')
plt.ylabel('phrase')
'''
return df_filter
# Call the function with different inputs
df_1_ug = unsmooth_ngram("the metamorphosis.txt", 1, 50)
df_2_ug = unsmooth_ngram("the old man and the sea.txt", 1, 50)
df_1_bg = unsmooth_ngram("the metamorphosis.txt", 2, 50)
df_2_bg = unsmooth_ngram("the old man and the sea.txt", 2, 50)
# question 2
print('The most common unigram and bigrams are: ' \
'\n"%s" (%.2f%%) and "%s" (%.2f%%) for The Metamorphosis ,' \
'\n"%s" (%.2f%%) and "%s" (%.2f%%) for The Old Man and The Sea' \
%(df_1_ug.index[0], df_1_ug.iloc[0][1], df_1_bg.index[0], df_1_bg.iloc[0][1], \
df_2_ug.index[0], df_2_ug.iloc[0][1], df_2_bg.index[0], df_2_bg.iloc[0][1]))
| {
"content_hash": "3d257a9158a1bf108660463740ba7120",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 154,
"avg_line_length": 44.03448275862069,
"alnum_prop": 0.6209866875489428,
"repo_name": "vc0150/Natrual-Language-Processing",
"id": "8d1ffa851b7346164da04097ebb7d4689fd13d80",
"size": "3855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "n-gram/unsmooth_ngram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7082"
},
{
"name": "Python",
"bytes": "24819"
}
],
"symlink_target": ""
} |
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
import art3d
import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
self._axinfo.update({'label' : {'space_factor': 1.6,
'va': 'center',
'ha': 'center'},
'tick' : {'inward_factor': 0.2,
'outward_factor': 0.1},
'ticklabel': {'space_factor': 0.7},
'axisline': {'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' : {'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform(peparray[0:2, 1]) -
self.axes.transAxes.transform(peparray[0:2, 0]))
lxyz = 0.5*(edgep1 + edgep2)
labeldeltas = info['label']['space_factor'] * deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = zip(xyz1, xyz0, xyz2)
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
labeldeltas = [info['ticklabel']['space_factor'] * x for
x in deltas]
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| {
"content_hash": "cf224dcc38f13304a11a4ff181b59b92",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 87,
"avg_line_length": 38.55275229357798,
"alnum_prop": 0.533999643048367,
"repo_name": "lthurlow/Network-Grapher",
"id": "20034a9e17f236a7aeb95b5d66e8994aa47dbb75",
"size": "16960",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "proj/external/matplotlib-1.2.1/lib/mpl_toolkits/mplot3d/axis3d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6550"
}
],
"symlink_target": ""
} |
"""3D backpropagation algorithm with a tilted axis of rotation"""
import multiprocessing as mp
import warnings
import numexpr as ne
import numpy as np
import pyfftw
import scipy.ndimage
from . import util
_ncores = mp.cpu_count()
def estimate_major_rotation_axis(loc):
"""
For a list of points on the unit sphere, estimate the main
rotational axis and return a list of angles that correspond
to the rotational position for each point.
"""
# TODO:
raise NotImplementedError("estimation of rotational axis not implemented.")
def norm_vec(vector):
"""Normalize the length of a vector to one"""
assert len(vector) == 3
v = np.array(vector)
return v/np.sqrt(np.sum(v**2))
def rotate_points_to_axis(points, axis):
"""Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points.
"""
axis = norm_vec(axis)
u, v, w = axis
points = np.array(points)
# Determine the rotational angle in the x-z plane
phi = np.arctan2(u, w)
# Determine the tilt angle w.r.t. the y-axis
theta = np.arccos(v)
# Negative rotation about y-axis
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
# Negative rotation about x-axis
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)],
])
DR1 = np.dot(Rtheta, Rphi)
# Rotate back by -phi such that effective rotation was only
# towards [0,1,0].
DR = np.dot(Rphi.T, DR1)
rotpoints = np.zeros((len(points), 3))
for ii, pnt in enumerate(points):
rotpoints[ii] = np.dot(DR, pnt)
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in rotpoints:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return rotpoints
def rotation_matrix_from_point(point, ret_inv=False):
"""Compute rotation matrix to go from [0,0,1] to `point`.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
x, y, z = point
# azimuthal angle
phi = np.arctan2(x, z)
# angle in polar direction (negative)
theta = -np.arctan2(y, np.sqrt(x**2+z**2))
# Rotation in polar direction
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
])
# rotation in x-z-plane
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
D = np.dot(Rphi, Rtheta)
# The inverse of D
Dinv = np.dot(Rtheta.T, Rphi.T)
if ret_inv:
return D, Dinv
else:
return D
def rotation_matrix_from_point_planerot(point, plane_angle, ret_inv=False):
"""
Compute rotation matrix to go from [0,0,1] to `point`,
while taking into account the tilted axis of rotation.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
axis: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
# These matrices are correct if there is no tilt of the
# rotational axis within the detector plane (x-y).
D, Dinv = rotation_matrix_from_point(point, ret_inv=True)
# We need an additional rotation about the z-axis to correct
# for the tilt for all the the other cases.
angz = plane_angle
Rz = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
DR = np.dot(D, Rz)
DRinv = np.dot(Rz.T, Dinv)
if ret_inv:
return DR, DRinv
else:
return DR
def sphere_points_from_angles_and_tilt(angles, tilted_axis):
"""
For a given tilt of the rotational axis `tilted_axis`, compute
the points on a unit sphere that correspond to the distribution
`angles` along the great circle about this axis.
Parameters
----------
angles: 1d ndarray
The angles that will be distributed on the great circle.
tilted_axis: list of length 3
The tilted axis of rotation that determines the great
circle.
Notes
-----
The reference axis is always [0,1,0].
`theta` is the azimuthal angle measured down from the y-axis.
`phi` is the polar angle in the x-z plane measured from z towards x.
"""
assert len(angles.shape) == 1
# Normalize tilted axis.
tilted_axis = norm_vec(tilted_axis)
[u, v, w] = tilted_axis
# Initial distribution of points about great circle (x-z).
newang = np.zeros((angles.shape[0], 3), dtype=float)
# We subtract angles[0], because in step (a) we want that
# newang[0]==[0,0,1]. This only works if we actually start
# at that point.
newang[:, 0] = np.sin(angles-angles[0])
newang[:, 2] = np.cos(angles-angles[0])
# Compute rotational angles w.r.t. [0,1,0].
# - Draw a unit sphere with the y-axis pointing up and the
# z-axis pointing right
# - The rotation of `tilted_axis` can be described by two
# separate rotations. We will use these two angles:
# (a) Rotation from y=1 within the y-z plane: theta
# This is the rotation that is critical for data
# reconstruction. If this angle is zero, then we
# have a rotational axis in the imaging plane. If
# this angle is PI/2, then our sinogram consists
# of a rotating image and 3D reconstruction is
# impossible. This angle is counted from the y-axis
# onto the x-z plane.
# (b) Rotation in the x-z plane: phi
# This angle is responsible for matching up the angles
# with the correct sinogram images. If this angle is zero,
# then the projection of the rotational axis onto the
# x-y plane is aligned with the y-axis. If this angle is
# PI/2, then the axis and its projection onto the x-y
# plane are identical. This angle is counted from the
# positive z-axis towards the positive x-axis. By default,
# angles[0] is the point that touches the great circle
# that lies in the x-z plane. angles[1] is the next point
# towards the x-axis if phi==0.
# (a) This angle is the azimuthal angle theta measured from the
# y-axis.
theta = np.arccos(v)
# (b) This is the polar angle measured in the x-z plane starting
# at the x-axis and measured towards the positive z-axis.
if np.allclose(u, 0) and np.allclose(w, 0):
# Avoid flipping the axis of rotation due to numerical
# errors during its computation.
phi = 0
else:
phi = np.arctan2(u, w)
# Determine the projection points on the unit sphere.
# The resulting circle meets the x-z-plane at phi, and
# is tilted by theta w.r.t. the y-axis.
# (a) Create a tilted data set. This is achieved in 3 steps.
# a1) Determine radius of tilted circle and get the centered
# circle with a smaller radius.
rtilt = np.cos(theta)
newang *= rtilt
# a2) Rotate this circle about the x-axis by theta
# (right-handed/counter-clockwise/basic/elemental rotation)
Rx = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
for ii in range(newang.shape[0]):
newang[ii] = np.dot(Rx, newang[ii])
# a3) Shift newang such that newang[0] is located at (0,0,1)
newang = newang - (newang[0] - np.array([0, 0, 1])).reshape(1, 3)
# (b) Rotate the entire thing with phi about the y-axis
# (right-handed/counter-clockwise/basic/elemental rotation)
Ry = np.array([
[+np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]
])
for jj in range(newang.shape[0]):
newang[jj] = np.dot(Ry, newang[jj])
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in newang:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return newang
def backpropagate_3d_tilted(uSin, angles, res, nm, lD=0,
tilted_axis=[0, 1, 0],
coords=None, weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval="edge",
intp_order=2, dtype=None,
num_cores=_ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation with a tilted axis of rotation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm with
a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}`
w.r.t. the imaging plane :cite:`Mueller2015tilted`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}`
and a different filter in Fourier space
:math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared
to :func:`backpropagate_3d`.
.. versionadded:: 0.1.2
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: ndarray of shape (A,3) or 1D array of length A
If the shape is (A,3), then `angles` consists of vectors
on the unit sphere that correspond to the direction
of illumination and acquisition (s₀). If the shape is (A,),
then `angles` is a one-dimensional array of angles in radians
that determines the angular position :math:`\phi_j`.
In both cases, `tilted_axis` must be set according to the
tilt of the rotational axis.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
tilted_axis: list of floats
The coordinates [x, y, z] on a unit sphere representing the
tilted axis of rotation. The default is (0,1,0),
which corresponds to a rotation about the y-axis and
follows the behavior of :func:`odtbrain.backpropagate_3d`.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
This currently only works when `angles` has the shape (A,).
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values greater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float or "edge"
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is "edge", then the edge values are used for
padding (see documentation of :func:`numpy.pad`). If `padval`
is a float, then padding is done with a linear ramp.
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.affine_transform` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This implementation can deal with projection angles that are not
distributed along a circle about the rotational axis. If there are
slight deviations from this circle, simply pass the 3D rotational
positions instead of the 1D angles to the `angles` argument. In
principle, this should improve the reconstruction. The general
problem here is that the backpropagation algorithm requires a
ramp filter in Fourier space that is oriented perpendicular to the
rotational axis. If the sample does not rotate about a single axis,
then a 1D parametric representation of this rotation must be found
to correctly determine the filter in Fourier space. Such a
parametric representation could e.g. be a spiral between the poles
of the unit sphere (but this kind of rotation is probably difficult
to implement experimentally).
If you have input images with rectangular shape, e.g. Nx!=Ny and
the rotational axis deviates by approximately PI/2 from the axis
(0,1,0), then data might get cropped in the reconstruction volume.
You can avoid that by rotating your input data and the rotational
axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to
[0,1,0] and `np.rot90` the sinogram images.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.shape[0]
if angles.shape not in [(A,), (A, 1), (A, 3)]:
raise ValueError("`angles` must have shape (A,) or (A,3)!")
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > _ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(_ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * int(dtype.name.strip("float"))))
# progess monitoring
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
angles = np.array(angles, copy=copy)
angles = np.squeeze(angles) # support shape (A,1)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
if padding[0]:
orderx = int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
padx = orderx - lnx
else:
padx = 0
if padding[1]:
ordery = int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
pady = ordery - lny
else:
pady = 0
padyl = int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx = lnx + padx
lNy = lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# `tilted_axis` is required for several things:
# 1. the filter |kDx*v + kDy*u| with (u,v,w)==tilted_axis
# 2. the alignment of the rotational axis with the y-axis
# 3. the determination of the point coordinates if only
# angles in radians are given.
# For (1) we need the exact axis that corresponds to our input data.
# For (2) and (3) we need `tilted_axis_yz` (see below) which is the
# axis `tilted_axis` rotated in the detector plane such that its
# projection onto the detector coincides with the y-axis.
# Normalize input axis
tilted_axis = norm_vec(tilted_axis)
# `tilted_axis_yz` is computed by performing the inverse rotation in
# the x-y plane with `angz`. We will again use `angz` in the transform
# within the for-loop to rotate each projection according to its
# acquisition angle.
angz = np.arctan2(tilted_axis[0], tilted_axis[1])
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis_yz = norm_vec(np.dot(rotmat, tilted_axis))
if len(angles.shape) == 1:
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
# compute the 3D points from tilted axis
angles = sphere_points_from_angles_and_tilt(angles, tilted_axis_yz)
else:
if weight_angles:
warnings.warn("3D angular weighting not yet supported!")
weights = 1
# normalize and rotate angles
for ii in range(angles.shape[0]):
# angles[ii] = norm_vec(angles[ii]) #-> not correct
# instead rotate like `tilted_axis` onto the y-z plane.
angles[ii] = norm_vec(np.dot(rotmat, angles[ii]))
if weight_angles:
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# The notation in the our optical tomography script for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# if lNx != lNy:
# raise NotImplementedError("Input data must be square shaped!")
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
# The filter is now dependent on the rotational position of the
# specimen. We have to include information from the angles.
# We want to estimate the rotational axis for every frame. We
# do that by computing the cross-product of the vectors in
# angles from the current and previous image.
u, v, _w = tilted_axis
filterabs = np.abs(kx*v+ky*u) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
#
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
# x = np.linspace(-centerx, centerx, lNx, endpoint=False)
# x = np.arange(lNx) - center + .5
# Meshgrid for output array
# zv, yv, xv = np.meshgrid(x,x,x)
# z, y, x
# xv = x.reshape( 1, 1,-1)
# yv = x.reshape( 1,-1, 1)
# z = np.arange(ln) - center + .5
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
# (this requires more RAM but is faster)
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv},
casting="same_kind")
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW:
# Flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Rotate all points such that we are effectively rotating everything
# about the y-axis.
angles = rotate_points_to_axis(points=angles, axis=tilted_axis_yz)
for aa in np.arange(A):
if not (padding[0] and padding[1]):
# no padding
oneslice[:] = uSin[aa]
elif padval == "edge":
# padding with edge values
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
# padding with linear ramp
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor, filter
oneslice *= filterabs * prefactor / (lNx * lNy)
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
casting="same_kind",
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:padyl+lny, padxl:padxl+lnx]
# The Cartesian axes in our array are ordered like this: [z,y,x]
# However, the rotation matrix requires [x,y,z]. Therefore, we
# need to np.transpose the first and last axis and also invert the
# y-axis.
fil_p_t = filtered_proj.transpose(2, 1, 0)[:, ::-1, :]
# get rotation matrix for this point and also rotate in plane
_drot, drotinv = rotation_matrix_from_point_planerot(angles[aa],
plane_angle=angz,
ret_inv=True)
# apply offset required by affine_transform
# The offset is only required for the rotation in
# the x-z-plane.
# This could be achieved like so:
# The offset "-.5" assures that we are rotating about
# the center of the image and not the value at the center
# of the array (this is also what `scipy.ndimage.rotate` does.
c = 0.5 * np.array(fil_p_t.shape) - .5
offset = c - np.dot(drotinv, c)
# Perform rotation
# We cannot split the inplace-rotation into multiple subrotations
# as we did in _Back_3d_tilted.backpropagate_3d, because the rotation
# axis is arbitrarily placed in the 3d array. Rotating single
# slices does not yield the same result as rotating the entire
# array. Instead of using affine_transform, map_coordinates might
# be faster for multiple cores.
# Also undo the axis transposition that we performed previously.
outarr.real += scipy.ndimage.affine_transform(
fil_p_t.real, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if not onlyreal:
outarr.imag += scipy.ndimage.affine_transform(
fil_p_t.imag, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if count is not None:
count.value += 1
return outarr
| {
"content_hash": "06fc30bd1d5f638c2119b53d32f43c0b",
"timestamp": "",
"source": "github",
"line_count": 949,
"max_line_length": 79,
"avg_line_length": 37.270811380400424,
"alnum_prop": 0.5879841673734804,
"repo_name": "RI-imaging/ODTbrain",
"id": "7b68468423e87bff8f65d634e82a97924bc3e323",
"size": "35497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odtbrain/_alg3d_bppt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "14950"
},
{
"name": "Python",
"bytes": "170001"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CommitteeContributions(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CommitteeContributions Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CommitteeContributions, self).__init__(temboo_session, '/Library/NYTimes/CampaignFinance/Committees/CommitteeContributions')
def new_input_set(self):
return CommitteeContributionsInputSet()
def _make_result_set(self, result, path):
return CommitteeContributionsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CommitteeContributionsChoreographyExecution(session, exec_id, path)
class CommitteeContributionsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CommitteeContributions
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by NY Times.)
"""
super(CommitteeContributionsInputSet, self)._set_input('APIKey', value)
def set_CampaignCycle(self, value):
"""
Set the value of the CampaignCycle input for this Choreo. ((required, integer) Enter the campaign cycle year in YYYY format. This must be an even year. )
"""
super(CommitteeContributionsInputSet, self)._set_input('CampaignCycle', value)
def set_CommitteeFECID(self, value):
"""
Set the value of the CommitteeFECID input for this Choreo. ((conditional, string) Enter a political action committee's FEC ID. Either CommitteeFECID, OR Name must be provided.)
"""
super(CommitteeContributionsInputSet, self)._set_input('CommitteeFECID', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((conditional, string) Enter the name of a political action committee (PAC) whose contributions will be obtained. Either Name or CommitteeFECID must be provided.)
"""
super(CommitteeContributionsInputSet, self)._set_input('Name', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) The first 20 results are shown by default. To page through the results, set Offset to the appropriate value (e.g., Offset=40 displays results 41–60).)
"""
super(CommitteeContributionsInputSet, self)._set_input('Offset', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Enter json or xml. Default is json.)
"""
super(CommitteeContributionsInputSet, self)._set_input('ResponseFormat', value)
class CommitteeContributionsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CommitteeContributions Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from the NY Times API corresponds to the setting (json, or xml) entered in the ResponseFormat variable. Default is set to json.)
"""
return self._output.get('Response', None)
class CommitteeContributionsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CommitteeContributionsResultSet(response, path)
| {
"content_hash": "8ab25fe379a1d29c25fe4ae46c7ada1d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 230,
"avg_line_length": 48.71604938271605,
"alnum_prop": 0.7108464267612773,
"repo_name": "jordanemedlock/psychtruths",
"id": "64a152a57fb18581bdb7eb533d5c4ea8c707b695",
"size": "4861",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/NYTimes/CampaignFinance/Committees/CommitteeContributions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from perception.shared.variables import db_config
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
class Sql(object):
def __init__(self):
self.create_session()
@staticmethod
def create_session():
engine = create_engine(URL(**db_config), pool_size=20)
Session = sessionmaker(bind=engine)
db_session = Session()
return db_session
@staticmethod
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance
| {
"content_hash": "78620604a3a1c4a3ce00e5685d5832db",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 67,
"avg_line_length": 27.821428571428573,
"alnum_prop": 0.6302952503209243,
"repo_name": "asrozar/perception",
"id": "6bccb373e1eb589088f0cb6e11b7ce9d369ee332",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perception/classes/sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "493"
},
{
"name": "Python",
"bytes": "142478"
}
],
"symlink_target": ""
} |
import socket
import select
import os.path
import time
import atexit
from logging import debug, info, warn, error
from threading import Thread
from functools import partial
import RPIO
import RPIO._GPIO as _GPIO
# Internals
_SYS_GPIO_ROOT = '/sys/class/gpio/'
_TCP_SOCKET_HOST = "0.0.0.0"
GPIO_FUNCTIONS = {0: "OUTPUT", 1: "INPUT", 4: "ALT0", 7: "-"}
_PULL_UPDN = ("PUD_OFF", "PUD_DOWN", "PUD_UP")
def _threaded_callback(callback, *args):
"""
Internal wrapper to start a callback in threaded mode. Using the
daemon mode to not block the main thread from exiting.
"""
t = Thread(target=callback, args=args)
t.daemon = True
t.start()
def exit_handler():
""" Auto-cleanup on exit """
RPIO.stop_waiting_for_interrupts()
RPIO.cleanup_interrupts()
atexit.register(exit_handler)
class Interruptor:
"""
Object-based wrapper for interrupt management.
"""
_epoll = select.epoll()
_show_warnings = True
# Interrupt callback maps
_map_fileno_to_file = {}
_map_fileno_to_gpioid = {}
_map_fileno_to_options = {}
_map_gpioid_to_fileno = {}
_map_gpioid_to_callbacks = {}
# Keep track of created kernel interfaces for later cleanup
_gpio_kernel_interfaces_created = []
# TCP socket stuff
_tcp_client_sockets = {} # { fileno: (socket, cb) }
_tcp_server_sockets = {} # { fileno: (socket, cb) }
# Whether to continue the epoll loop or quit at next chance. You
# can manually set this to False to stop `wait_for_interrupts()`.
_is_waiting_for_interrupts = False
def add_tcp_callback(self, port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
if not callback:
raise AttributeError("No callback")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((_TCP_SOCKET_HOST, port))
serversocket.listen(1)
serversocket.setblocking(0)
self._epoll.register(serversocket.fileno(), select.EPOLLIN)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
self._tcp_server_sockets[serversocket.fileno()] = (serversocket, cb)
debug("Socket server started at port %s and callback added." % port)
def add_interrupt_callback(self, gpio_id, callback, edge='both',
pull_up_down=_GPIO.PUD_OFF, threaded_callback=False,
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
"""
gpio_id = _GPIO.channel_to_gpio(gpio_id)
debug("Adding callback for GPIO %s" % gpio_id)
if not edge in ["falling", "rising", "both", "none"]:
raise AttributeError("'%s' is not a valid edge." % edge)
if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]:
raise AttributeError("'%s' is not a valid pull_up_down." % edge)
# Make sure the gpio_id is valid
if not gpio_id in (RPIO.GPIO_LIST_R1 if _GPIO.RPI_REVISION == 1 else \
RPIO.GPIO_LIST_R2):
raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id)
# Require INPUT pin setup; and set the correct PULL_UPDN
if RPIO.gpio_function(int(gpio_id)) == RPIO.IN:
RPIO.set_pullupdn(gpio_id, pull_up_down)
else:
debug("- changing gpio function from %s to INPUT" % \
(GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]))
RPIO.setup(gpio_id, RPIO.IN, pull_up_down)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
# Prepare the /sys/class path of this gpio
path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id)
# If initial callback for this GPIO then set everything up. Else make
# sure the edge detection is the same.
if gpio_id in self._map_gpioid_to_callbacks:
with open(path_gpio + "edge", "r") as f:
e = f.read().strip()
if e != edge:
raise AttributeError(("Cannot add callback for gpio %s:"
" edge detection '%s' not compatible with existing"
" edge detection '%s'.") % (gpio_id, edge, e))
# Check whether edge is the same, else throw Exception
debug("- kernel interface already setup for GPIO %s" % gpio_id)
self._map_gpioid_to_callbacks[gpio_id].append(cb)
else:
# If kernel interface already exists unexport first for clean setup
if os.path.exists(path_gpio):
if self._show_warnings:
warn("Kernel interface for GPIO %s already exists." % \
gpio_id)
debug("- unexporting kernel interface for GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
time.sleep(0.1)
# Export kernel interface /sys/class/gpio/gpioN
with open(_SYS_GPIO_ROOT + "export", "w") as f:
f.write("%s" % gpio_id)
self._gpio_kernel_interfaces_created.append(gpio_id)
debug("- kernel interface exported for GPIO %s" % gpio_id)
# Configure gpio as input
with open(path_gpio + "direction", "w") as f:
f.write("in")
# Configure gpio edge detection
with open(path_gpio + "edge", "w") as f:
f.write(edge)
debug(("- kernel interface configured for GPIO %s "
"(edge='%s', pullupdn=%s)") % (gpio_id, edge, \
_PULL_UPDN[pull_up_down]))
# Open the gpio value stream and read the initial value
f = open(path_gpio + "value", 'r')
val_initial = f.read().strip()
debug("- inital gpio value: %s" % val_initial)
f.seek(0)
# Add callback info to the mapping dictionaries
self._map_fileno_to_file[f.fileno()] = f
self._map_fileno_to_gpioid[f.fileno()] = gpio_id
self._map_fileno_to_options[f.fileno()] = {
"debounce_timeout_s": debounce_timeout_ms / 1000.0 if \
debounce_timeout_ms else 0,
"interrupt_last": 0,
"edge": edge
}
self._map_gpioid_to_fileno[gpio_id] = f.fileno()
self._map_gpioid_to_callbacks[gpio_id] = [cb]
# Add to epoll
self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR)
def del_interrupt_callback(self, gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
debug("- removing interrupts on gpio %s" % gpio_id)
gpio_id = _GPIO.channel_to_gpio(gpio_id)
fileno = self._map_gpioid_to_fileno[gpio_id]
# 1. Remove from epoll
self._epoll.unregister(fileno)
# 2. Cache the file
f = self._map_fileno_to_file[fileno]
# 3. Remove from maps
del self._map_fileno_to_file[fileno]
del self._map_fileno_to_gpioid[fileno]
del self._map_fileno_to_options[fileno]
del self._map_gpioid_to_fileno[gpio_id]
del self._map_gpioid_to_callbacks[gpio_id]
# 4. Close file last in case of IOError
f.close()
def _handle_interrupt(self, fileno, val):
""" Internally distributes interrupts to all attached callbacks """
val = int(val)
# Filter invalid edge values (sometimes 1 comes in when edge=falling)
edge = self._map_fileno_to_options[fileno]["edge"]
if (edge == 'rising' and val == 0) or (edge == 'falling' and val == 1):
return
# If user activated debounce for this callback, check timing now
debounce = self._map_fileno_to_options[fileno]["debounce_timeout_s"]
if debounce:
t = time.time()
t_last = self._map_fileno_to_options[fileno]["interrupt_last"]
if t - t_last < debounce:
debug("- don't start interrupt callback due to debouncing")
return
self._map_fileno_to_options[fileno]["interrupt_last"] = t
# Start the callback(s) now
gpio_id = self._map_fileno_to_gpioid[fileno]
if gpio_id in self._map_gpioid_to_callbacks:
for cb in self._map_gpioid_to_callbacks[gpio_id]:
cb(gpio_id, val)
def close_tcp_client(self, fileno):
debug("closing client socket fd %s" % fileno)
self._epoll.unregister(fileno)
socket, cb = self._tcp_client_sockets[fileno]
socket.close()
del self._tcp_client_sockets[fileno]
def wait_for_interrupts(self, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
"""
self._is_waiting_for_interrupts = True
while self._is_waiting_for_interrupts:
events = self._epoll.poll(epoll_timeout)
for fileno, event in events:
debug("- epoll event on fd %s: %s" % (fileno, event))
if fileno in self._tcp_server_sockets:
# New client connection to socket server
serversocket, cb = self._tcp_server_sockets[fileno]
connection, address = serversocket.accept()
connection.setblocking(0)
f = connection.fileno()
self._epoll.register(f, select.EPOLLIN)
self._tcp_client_sockets[f] = (connection, cb)
elif event & select.EPOLLIN:
# Input from TCP socket
socket, cb = self._tcp_client_sockets[fileno]
content = socket.recv(1024)
if not content or not content.strip():
# No content means quitting
self.close_tcp_client(fileno)
else:
sock, cb = self._tcp_client_sockets[fileno]
cb(self._tcp_client_sockets[fileno][0], \
content.strip())
elif event & select.EPOLLHUP:
# TCP Socket Hangup
self.close_tcp_client(fileno)
elif event & select.EPOLLPRI:
# GPIO interrupts
f = self._map_fileno_to_file[fileno]
# read() is workaround for not getting new values
# with read(1)
val = f.read().strip()
f.seek(0)
self._handle_interrupt(fileno, val)
def stop_waiting_for_interrupts(self):
"""
Ends the blocking `wait_for_interrupts()` loop the next time it can,
which depends on the `epoll_timeout` (per default its 1 second).
"""
self._is_waiting_for_interrupts = False
def cleanup_interfaces(self):
"""
Removes all /sys/class/gpio/gpioN interfaces that this script created,
and deletes callback bindings. Should be used after using interrupts.
"""
debug("Cleaning up interfaces...")
for gpio_id in self._gpio_kernel_interfaces_created:
# Close the value-file and remove interrupt bindings
self.del_interrupt_callback(gpio_id)
# Remove the kernel GPIO interface
debug("- unexporting GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
# Reset list of created interfaces
self._gpio_kernel_interfaces_created = []
def cleanup_tcpsockets(self):
"""
Closes all TCP connections and then the socket servers
"""
for fileno in self._tcp_client_sockets.keys():
self.close_tcp_client(fileno)
for fileno, items in self._tcp_server_sockets.items():
socket, cb = items
debug("- _cleanup server socket connection (fd %s)" % fileno)
self._epoll.unregister(fileno)
socket.close()
self._tcp_server_sockets = {}
def cleanup_interrupts(self):
"""
Clean up all interrupt-related sockets and interfaces. Recommended to
use before exiting your program! After this you'll need to re-add the
interrupt callbacks before waiting for interrupts again.
"""
self.cleanup_tcpsockets()
self.cleanup_interfaces()
| {
"content_hash": "b0ce2326542865b1dbe7d09a3c7c834d",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 79,
"avg_line_length": 40.30701754385965,
"alnum_prop": 0.5745375408052231,
"repo_name": "TechV/DroneOS",
"id": "cef492da811f16531ffea18d5de02e631896abf3",
"size": "14586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DroneOS/buildroot/system/skeleton/usr/lib/python2.7/RPIO/_RPIO.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "69099"
},
{
"name": "Arc",
"bytes": "257"
},
{
"name": "Bison",
"bytes": "15399"
},
{
"name": "C",
"bytes": "382560"
},
{
"name": "C++",
"bytes": "52438"
},
{
"name": "CSS",
"bytes": "81402"
},
{
"name": "JavaScript",
"bytes": "105210"
},
{
"name": "Makefile",
"bytes": "1598252"
},
{
"name": "Perl",
"bytes": "36726"
},
{
"name": "Python",
"bytes": "216857"
},
{
"name": "Shell",
"bytes": "110640"
}
],
"symlink_target": ""
} |
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def fakehttp(fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
fakesock = FakeSocket(fakedata)
def connect(self):
self.sock = self.fakesock
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fakehttp(fakedata)
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9c7afc9100b3019b19ce64c5c4022c1c",
"timestamp": "",
"source": "github",
"line_count": 1483,
"max_line_length": 108,
"avg_line_length": 40.476736345246124,
"alnum_prop": 0.570176753794126,
"repo_name": "juanyaw/python",
"id": "58ca2a5cd84f1b236922fbd9a78852e6206e2436",
"size": "60027",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "cpython/Lib/test/test_urllib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "470920"
},
{
"name": "Batchfile",
"bytes": "35551"
},
{
"name": "C",
"bytes": "16518323"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "343272"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "254942"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "Makefile",
"bytes": "25026"
},
{
"name": "Objective-C",
"bytes": "1390263"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24911704"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from setuptools import setup
# Remove build status and move Gitter link under title for PyPi
README = open('README.rst').read() \
.replace('|Build Status|', '', 1) \
.replace('|Gitter|', '', 1) \
.replace('===\n', '===\n\n|Gitter|\n')
setup(
name='django-cacheops',
version='2.4.2',
author='Alexander Schepanovski',
author_email='suor.web@gmail.com',
description='A slick ORM cache with automatic granular event-driven invalidation for Django.',
long_description=README,
url='http://github.com/Suor/django-cacheops',
license='BSD',
packages=[
'cacheops',
'cacheops.management',
'cacheops.management.commands',
'cacheops.templatetags'
],
install_requires=[
'django>=1.7',
'redis>=2.9.1',
'funcy>=1.2,<2.0',
'six>=1.4.0',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
include_package_data=True,
)
| {
"content_hash": "a110297ce553ee09900685bb89f4bc14",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 98,
"avg_line_length": 29.983050847457626,
"alnum_prop": 0.5692481628038439,
"repo_name": "bourivouh/django-cacheops",
"id": "15c4c82581bfee4c606f2aa74b92b64b14fdf918",
"size": "1769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Lua",
"bytes": "2959"
},
{
"name": "Python",
"bytes": "111649"
}
],
"symlink_target": ""
} |
try:
import uio
import micropython
micropython.mem_total
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
data = b"1234" * 256
before = micropython.mem_total()
buf = uio.BytesIO(data)
after = micropython.mem_total()
print(after - before < len(data))
| {
"content_hash": "5297373ea452ae000d02781380bdfca6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 37,
"avg_line_length": 15.631578947368421,
"alnum_prop": 0.6868686868686869,
"repo_name": "bvernoux/micropython",
"id": "3b9f141270d5aa3219bb78f1f909068ea2fe4ba4",
"size": "381",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "tests/micropython/heapalloc_bytesio2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "50694"
},
{
"name": "C",
"bytes": "19869126"
},
{
"name": "C++",
"bytes": "2489380"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "49218"
},
{
"name": "Objective-C",
"bytes": "8382"
},
{
"name": "Python",
"bytes": "856777"
},
{
"name": "Shell",
"bytes": "6229"
}
],
"symlink_target": ""
} |
import json
import uuid
from openstackclient.tests.functional import base
class NetworkTests(base.TestCase):
"""Functional tests for Network commands"""
@classmethod
def setUpClass(cls):
super(NetworkTests, cls).setUpClass()
cls.haz_network = cls.is_service_enabled('network')
class NetworkTagTests(NetworkTests):
"""Functional tests with tag operation"""
base_command = None
def test_tag_operation(self):
# Get project IDs
cmd_output = json.loads(self.openstack('token issue -f json '))
auth_project_id = cmd_output['project_id']
# Network create with no options
name1 = self._create_resource_and_tag_check('', [])
# Network create with tags
name2 = self._create_resource_and_tag_check('--tag red --tag blue',
['red', 'blue'])
# Network create with no tag explicitly
name3 = self._create_resource_and_tag_check('--no-tag', [])
self._set_resource_and_tag_check('set', name1, '--tag red --tag green',
['red', 'green'])
list_expected = ((name1, ['red', 'green']),
(name2, ['red', 'blue']),
(name3, []))
self._list_tag_check(auth_project_id, list_expected)
self._set_resource_and_tag_check('set', name1, '--tag blue',
['red', 'green', 'blue'])
self._set_resource_and_tag_check(
'set', name1,
'--no-tag --tag yellow --tag orange --tag purple',
['yellow', 'orange', 'purple'])
self._set_resource_and_tag_check('unset', name1, '--tag yellow',
['orange', 'purple'])
self._set_resource_and_tag_check('unset', name1, '--all-tag', [])
self._set_resource_and_tag_check('set', name2, '--no-tag', [])
def _list_tag_check(self, project_id, expected):
cmd_output = json.loads(self.openstack(
'{} list --long --project {} -f json'.format(self.base_command,
project_id)))
for name, tags in expected:
net = [n for n in cmd_output if n['Name'] == name][0]
self.assertEqual(set(tags), set(net['Tags']))
def _create_resource_for_tag_test(self, name, args):
return json.loads(self.openstack(
'{} create -f json {} {}'.format(self.base_command, args, name)
))
def _create_resource_and_tag_check(self, args, expected):
name = uuid.uuid4().hex
cmd_output = self._create_resource_for_tag_test(name, args)
self.addCleanup(
self.openstack, '{} delete {}'.format(self.base_command, name))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(set(expected), set(cmd_output['tags']))
return name
def _set_resource_and_tag_check(self, command, name, args, expected):
cmd_output = self.openstack(
'{} {} {} {}'.format(self.base_command, command, args, name)
)
self.assertFalse(cmd_output)
cmd_output = json.loads(self.openstack(
'{} show -f json {}'.format(self.base_command, name)
))
self.assertEqual(set(expected), set(cmd_output['tags']))
| {
"content_hash": "b81a0d68286d4da1719ee23f51a03c00",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 40.36144578313253,
"alnum_prop": 0.5453731343283582,
"repo_name": "openstack/python-openstackclient",
"id": "2287f329309888fe6f6bd691e5b5b6ecd5264192",
"size": "3923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstackclient/tests/functional/network/v2/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "923"
},
{
"name": "Python",
"bytes": "5016301"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
from PyCat import pycat
class TestBoundaryValue:
def test_WeakNormal(self):
assert pycat.portCheck(0)
assert pycat.portCheck("0")
assert pycat.portCheck(65535)
assert pycat.portCheck("65535")
def test_WeakRobust(self):
assert not pycat.portCheck(-1)
assert not pycat.portCheck("-1")
assert not pycat.portCheck(65536)
assert not pycat.portCheck("65536")
def test_StrongNormal(self):
"Same as weak normal test"
pass
def test_StrongRobust(self):
"Same as weak robust test"
pass
class TestEquivalenceClass:
def test_WeakNormal(self):
assert pycat.portCheck(32768)
assert pycat.portCheck("32768")
def test_WeakRobust(self):
assert not pycat.portCheck(-32768)
assert not pycat.portCheck("-32768")
assert not pycat.portCheck(75535)
assert not pycat.portCheck("75535")
assert not pycat.portCheck("abc#$%")
def test_StrongNormal(self):
"Same as weak normal test"
pass
def test_StrongRobust(self):
"Same as weak robust test"
pass
class TestEdge:
def test_WeakNormal(self):
assert pycat.portCheck(1)
assert pycat.portCheck("1")
assert pycat.portCheck(65534)
assert pycat.portCheck("65534")
def test_WeakRobust(self):
assert not pycat.portCheck(-2)
assert not pycat.portCheck("-2")
assert not pycat.portCheck(65537)
assert not pycat.portCheck("65537")
assert not pycat.portCheck("")
assert not pycat.portCheck("@@@@@")
def test_StrongNormal(self):
"Same as weak normal test"
pass
def test_StrongRobust(self):
"Same as weak robust test"
pass
| {
"content_hash": "d95720353d51ca7c9c4e36d41c45e399",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 44,
"avg_line_length": 23.933333333333334,
"alnum_prop": 0.6206128133704736,
"repo_name": "aweimeow/PyCat",
"id": "ba4832abdf810d35eb1bbb438dcf6be2da8e97a1",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_portcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39030"
}
],
"symlink_target": ""
} |
from typing import Any, cast, Dict, Optional
from pyre_extensions import none_throws
from backend.common.consts.notification_type import NotificationType
from backend.common.models.district import District
from backend.common.models.notifications.notification import Notification
class DistrictPointsNotification(Notification):
def __init__(self, district: District) -> None:
self.district = district
@classmethod
def _type(cls) -> NotificationType:
return NotificationType.DISTRICT_POINTS_UPDATED
@property
def fcm_notification(self) -> Optional[Any]:
from firebase_admin import messaging
return messaging.Notification(
title="{} District Points Updated".format(
self.district.abbreviation.upper()
),
body="{} district point calculations have been updated.".format(
self.district.display_name
),
)
@property
def data_payload(self) -> Optional[Dict[str, str]]:
return {"district_key": self.district.key_name}
@property
def webhook_message_data(self) -> Optional[Dict[str, Any]]:
payload = cast(Dict[str, Any], none_throws(self.data_payload))
payload["district_name"] = self.district.display_name
return payload
| {
"content_hash": "33bb626767d685f6c17adf904c046435",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 33.53846153846154,
"alnum_prop": 0.6735474006116208,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "59a6b12cf6eb747a3ec28e9f02d1fecaa43a8940",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/models/notifications/district_points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
import datetime
from flexget.plugins.api_snep import SnepChartsConnector
from flexget.plugins.filter.charts import ChartsConnector, ChartsRelease, ChartsEntry
from tests import use_vcr
class SnepMockConnector(ChartsConnector):
@property
def organization(self):
return 'snep'
def retrieve_charts(self, charts_type='radio', date_interval='week', **kargs):
"""
From http://www.snepmusique.com/tops-semaine/top-albums-fusionnes/?ye=2015&we=10
at 2015-03-30
:return: ChartsRelease
"""
release = ChartsRelease()
release.expires = datetime.date(2015, 3, 9)
def build(artist, company, title, rank, best_rank, charted_weeks):
result = ChartsEntry(
artist=artist,
title=title,
rank=rank,
best_rank=best_rank,
charted_weeks=charted_weeks)
result.company = company
return result
release.entries.append(build(
artist="Louane",
company="MERCURY MUSIC GROUP / FONTANA",
title="CHAMBRE 12",
rank=1,
best_rank=1,
charted_weeks=0))
release.entries.append(build(
artist="Christine and the Queens",
company="BECAUSE MUSIC / BECAUSE MUSIC",
title="CHALEUR HUMAINE",
rank=2,
best_rank=2,
charted_weeks=38))
release.entries.append(build(
artist="Multi Interprètes",
company="DEF JAM RECORDINGS FRANCE / DEF JAM",
title="FIFTY SHADES OF GREY",
rank=5,
best_rank=3,
charted_weeks=2))
release.entries.append(build(
artist="ZAZ",
company="PLAY ON / PLAY ON",
title="PARIS",
rank=43,
best_rank=2,
charted_weeks=15))
return release
class TestSnepParser(object):
def test_organization(self):
assert SnepChartsConnector().organization == 'snep'
@use_vcr
def test_parser(self):
test_release = SnepChartsConnector().retrieve_charts('all_album', 'week', year=2015, week=10)
mock_release = SnepMockConnector().retrieve_charts('all_album', 'week', year=2015, week=10)
assert (len(test_release.entries) == 200), \
"Expected 200 entries but parser produce %i entries." % len(test_release.entries)
assert test_release.expires == mock_release.expires
for entry in mock_release.entries:
TestSnepParser.check_charts_entry(test_release.entries[entry.rank - 1], entry)
@staticmethod
def check_charts_entry(unchecked, reference):
"""Kind of SnepParsedChartEntry.equals
:param unchecked: ChartsEntry
:param reference: ChartsEntry
"""
assert unchecked is not None, "Expected a charts entry but got None"
assert unchecked.artist == reference.artist, "Expected %s but got %s" % (reference.artist, unchecked.artist)
assert unchecked.company == reference.company, "Expected %s but got %s" % (reference.company, unchecked.company)
assert unchecked.title == reference.title, "Expected %s but got %s" % (reference.title, unchecked.title)
assert unchecked.rank == reference.rank, "Expected %i but got %i" % (reference.rank, unchecked.rank)
assert unchecked.best_rank <= reference.best_rank, "Expected better position than #%i but got #%i" % (reference.best_rank, unchecked.best_rank)
assert unchecked.charted_weeks >= reference.charted_weeks, "Expected longer in charts than %i week(s) but got %i" % (reference.charted_weeks, unchecked.charted_weeks)
| {
"content_hash": "4cf12b7bf03ab40cf829b5c8a715bf5f",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 174,
"avg_line_length": 39.85263157894737,
"alnum_prop": 0.620972002113048,
"repo_name": "lildadou/Flexget",
"id": "f7e8959814dcd3e9e5189654d0ba44a6c49bc757",
"size": "3810",
"binary": false,
"copies": "1",
"ref": "refs/heads/music_snep",
"path": "tests/test_connector_snep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4878"
},
{
"name": "HTML",
"bytes": "26542"
},
{
"name": "JavaScript",
"bytes": "43172"
},
{
"name": "Python",
"bytes": "2526165"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from setuptools import setup
import sys
install_requires = [
'pycrypto',
'pyyaml',
]
extras_require = {
'test': ['coverage', 'mock', 'nose'],
}
if sys.version_info < (2, 7, 0):
install_requires.append('argparse')
extras_require['test'].append('unittest2')
setup(
name='ppillar',
version='0.2.1',
author='Tarjei Husøy',
author_email='tarjei@roms.no',
url='https://github.com/thusoy/public-pillar',
description="A PKI-encrypted datastructure to keep secrets in the public",
py_modules=['ppillar'],
install_requires=install_requires,
extras_require=extras_require,
entry_points={
'console_scripts': [
'ppillar = ppillar:cli',
]
},
classifiers=[
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
| {
"content_hash": "050e239489d5f5b9a4a1b4c5ecacf8e2",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 30.770491803278688,
"alnum_prop": 0.5897709110282365,
"repo_name": "thusoy/public-pillar",
"id": "fd89ef5a948df955af0dcf2cfb395d5aa24007f7",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22017"
},
{
"name": "Shell",
"bytes": "2743"
}
],
"symlink_target": ""
} |
import json
import os
import re
import subprocess
import time
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
normal_queues = [
"deferred_work",
"digest_emails",
"email_mirror",
"embed_links",
"embedded_bots",
"error_reports",
"invites",
"email_senders",
"missedmessage_emails",
"missedmessage_mobile_notifications",
"outgoing_webhooks",
"user_activity",
"user_activity_interval",
"user_presence",
]
OK = 0
WARNING = 1
CRITICAL = 2
UNKNOWN = 3
states = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN",
}
MAX_SECONDS_TO_CLEAR: DefaultDict[str, int] = defaultdict(
lambda: 30,
digest_emails=1200,
missedmessage_mobile_notifications=120,
embed_links=60,
)
CRITICAL_SECONDS_TO_CLEAR: DefaultDict[str, int] = defaultdict(
lambda: 60,
missedmessage_mobile_notifications=180,
digest_emails=1800,
embed_links=90,
)
def analyze_queue_stats(
queue_name: str, stats: Dict[str, Any], queue_count_rabbitmqctl: int
) -> Dict[str, Any]:
now = int(time.time())
if stats == {}:
return dict(status=UNKNOWN, name=queue_name, message="invalid or no stats data")
if now - stats["update_time"] > 180 and queue_count_rabbitmqctl > 10:
# Queue isn't updating the stats file and has some events in
# the backlog, it's likely stuck.
#
# TODO: There's an unlikely race condition here - if the queue
# was fully emptied and was idle due to no new events coming
# for over 180 seconds, suddenly gets a burst of events and
# this code runs exactly in the very small time window between
# those events popping up and the queue beginning to process
# the first one (which will refresh the stats file at the very
# start), we'll incorrectly return the CRITICAL status. The
# chance of that happening should be negligible because the queue
# worker should wake up immediately and log statistics before
# starting to process the first event.
return dict(
status=CRITICAL,
name=queue_name,
message="queue appears to be stuck, last update {}, queue size {}".format(
stats["update_time"], queue_count_rabbitmqctl
),
)
current_size = queue_count_rabbitmqctl
average_consume_time = stats["recent_average_consume_time"]
if average_consume_time is None:
# Queue just started; we can't effectively estimate anything.
#
# If the queue is stuck in this state and not processing
# anything, eventually the `update_time` rule above will fire.
return dict(status=OK, name=queue_name, message="")
expected_time_to_clear_backlog = current_size * average_consume_time
if expected_time_to_clear_backlog > MAX_SECONDS_TO_CLEAR[queue_name]:
if expected_time_to_clear_backlog > CRITICAL_SECONDS_TO_CLEAR[queue_name]:
status = CRITICAL
else:
status = WARNING
return dict(
status=status,
name=queue_name,
message=f"clearing the backlog will take too long: {expected_time_to_clear_backlog}s, size: {current_size}",
)
return dict(status=OK, name=queue_name, message="")
WARN_COUNT_THRESHOLD_DEFAULT = 10
CRITICAL_COUNT_THRESHOLD_DEFAULT = 50
def check_other_queues(queue_counts_dict: Dict[str, int]) -> List[Dict[str, Any]]:
"""Do a simple queue size check for queues whose workers don't publish stats files."""
results = []
for queue, count in queue_counts_dict.items():
if queue in normal_queues:
continue
if count > CRITICAL_COUNT_THRESHOLD_DEFAULT:
results.append(dict(status=CRITICAL, name=queue, message=f"count critical: {count}"))
elif count > WARN_COUNT_THRESHOLD_DEFAULT:
results.append(dict(status=WARNING, name=queue, message=f"count warning: {count}"))
else:
results.append(dict(status=OK, name=queue, message=""))
return results
def check_rabbitmq_queues() -> None:
pattern = re.compile(r"(\w+)\t(\d+)\t(\d+)")
if "USER" in os.environ and not os.environ["USER"] in ["root", "rabbitmq"]:
print("This script must be run as the root or rabbitmq user")
list_queues_output = subprocess.check_output(
["/usr/sbin/rabbitmqctl", "list_queues", "name", "messages", "consumers"],
universal_newlines=True,
)
queue_counts_rabbitmqctl = {}
queues_with_consumers = []
for line in list_queues_output.split("\n"):
line = line.strip()
m = pattern.match(line)
if m:
queue = m.group(1)
count = int(m.group(2))
consumers = int(m.group(3))
queue_counts_rabbitmqctl[queue] = count
if consumers > 0 and not queue.startswith("notify_tornado"):
queues_with_consumers.append(queue)
queue_stats_dir = subprocess.check_output(
[os.path.join(ZULIP_PATH, "scripts/get-django-setting"), "QUEUE_STATS_DIR"],
universal_newlines=True,
).strip()
queue_stats: Dict[str, Dict[str, Any]] = {}
queues_to_check = set(normal_queues).intersection(set(queues_with_consumers))
for queue in queues_to_check:
fn = queue + ".stats"
file_path = os.path.join(queue_stats_dir, fn)
if not os.path.exists(file_path):
queue_stats[queue] = {}
continue
with open(file_path) as f:
try:
queue_stats[queue] = json.load(f)
except json.decoder.JSONDecodeError:
queue_stats[queue] = {}
results = []
for queue_name, stats in queue_stats.items():
results.append(analyze_queue_stats(queue_name, stats, queue_counts_rabbitmqctl[queue_name]))
results.extend(check_other_queues(queue_counts_rabbitmqctl))
status = max(result["status"] for result in results)
now = int(time.time())
if status > 0:
queue_error_template = "queue {} problem: {}:{}"
error_message = "; ".join(
queue_error_template.format(result["name"], states[result["status"]], result["message"])
for result in results
if result["status"] > 0
)
print(f"{now}|{status}|{states[status]}|{error_message}")
else:
print(f"{now}|{status}|{states[status]}|queues normal")
| {
"content_hash": "5e9f1e4172828bc06b8bc623f3d31ccf",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 120,
"avg_line_length": 34.48947368421052,
"alnum_prop": 0.6262780405920952,
"repo_name": "eeshangarg/zulip",
"id": "cae5b05f11a4985035af1fe98447f9c98189ea1a",
"size": "6553",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "scripts/lib/check_rabbitmq_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
from twython import Twython
import json
from response_filter import ResponseFilter
twitter = Twython(APP_KEY, APP_SECRET, oauth_version=2)
ACCESS_TOKEN = twitter.obtain_access_token()
twitter = Twython(APP_KEY, access_token=ACCESS_TOKEN)
languages = ['en', 'ru', 'de']
search_params = {'en': ['#SyrianRefugees', 'berniesanders', 'donaldtrump', 'hillaryclinton', 'jebbush', '#GOPDebate', 'barackobama', 'joebiden'], 'ru': ['Сирии', 'выборы', 'Украина', 'berniesanders', 'donaldtrump', '#Сенаторы', 'Константин Добрынин', 'Аркадий Дворкович'],
'de': ['wahlen', 'syrien', 'krieg', 'berniesanders', 'donaldtrump', 'angelamerkel', 'flüchtlinge']}
for language in languages:
for search_param in search_params[language]:
response = twitter.search(q=search_param, lang=language, count='100', result_type='mixed')
response_filter = ResponseFilter()
filename = (language + '_' + search_param + '.json')
with open(filename, 'w+') as output_file:
json_response = response_filter.filter(response['statuses'])
json.dump(json_response, output_file)
| {
"content_hash": "a43cfaec775b3995369b1214de496783",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 272,
"avg_line_length": 46.583333333333336,
"alnum_prop": 0.6717352415026834,
"repo_name": "mostly-cookies/InfoRetrieval01",
"id": "60d100353a25f6b1ffb59093298b4bce482bd81f",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter_scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2513"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_facts
short_description: Gather facts on instances of Apache CloudStack based clouds.
description:
- This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
filter:
description:
- Filter for a specific fact.
required: false
default: null
choices:
- cloudstack_service_offering
- cloudstack_availability_zone
- cloudstack_public_hostname
- cloudstack_public_ipv4
- cloudstack_local_hostname
- cloudstack_local_ipv4
- cloudstack_instance_id
- cloudstack_user_data
requirements: [ 'yaml' ]
'''
EXAMPLES = '''
# Gather all facts on instances
- name: Gather cloudstack facts
cs_facts:
# Gather specific fact on instances
- name: Gather cloudstack facts
cs_facts: filter=cloudstack_instance_id
'''
RETURN = '''
---
cloudstack_availability_zone:
description: zone the instance is deployed in.
returned: success
type: string
sample: ch-gva-2
cloudstack_instance_id:
description: UUID of the instance.
returned: success
type: string
sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_local_hostname:
description: local hostname of the instance.
returned: success
type: string
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_local_ipv4:
description: local IPv4 of the instance.
returned: success
type: string
sample: 185.19.28.35
cloudstack_public_hostname:
description: public IPv4 of the router. Same as C(cloudstack_public_ipv4).
returned: success
type: string
sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
cloudstack_public_ipv4:
description: public IPv4 of the router.
returned: success
type: string
sample: 185.19.28.35
cloudstack_service_offering:
description: service offering of the instance.
returned: success
type: string
sample: Micro 512mb 1cpu
cloudstack_user_data:
description: data of the instance provided by users.
returned: success
type: dict
sample: { "bla": "foo" }
'''
import os
try:
import yaml
has_lib_yaml = True
except ImportError:
has_lib_yaml = False
CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
class CloudStackFacts(object):
def __init__(self):
self.facts = ansible_facts(module)
self.api_ip = None
self.fact_paths = {
'cloudstack_service_offering': 'service-offering',
'cloudstack_availability_zone': 'availability-zone',
'cloudstack_public_hostname': 'public-hostname',
'cloudstack_public_ipv4': 'public-ipv4',
'cloudstack_local_hostname': 'local-hostname',
'cloudstack_local_ipv4': 'local-ipv4',
'cloudstack_instance_id': 'instance-id'
}
def run(self):
result = {}
filter = module.params.get('filter')
if not filter:
for key,path in self.fact_paths.iteritems():
result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
result['cloudstack_user_data'] = self._get_user_data_json()
else:
if filter == 'cloudstack_user_data':
result['cloudstack_user_data'] = self._get_user_data_json()
elif filter in self.fact_paths:
result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
return result
def _get_user_data_json(self):
try:
# this data come form users, we try what we can to parse it...
return yaml.load(self._fetch(CS_USERDATA_BASE_URL))
except:
return None
def _fetch(self, path):
api_ip = self._get_api_ip()
if not api_ip:
return None
api_url = path % api_ip
(response, info) = fetch_url(module, api_url, force=True)
if response:
data = response.read()
else:
data = None
return data
def _get_dhcp_lease_file(self):
"""Return the path of the lease file."""
default_iface = self.facts['default_ipv4']['interface']
dhcp_lease_file_locations = [
'/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
'/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
'/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
'/var/db/dhclient.leases.%s' % default_iface, # openbsd
]
for file_path in dhcp_lease_file_locations:
if os.path.exists(file_path):
return file_path
module.fail_json(msg="Could not find dhclient leases file.")
def _get_api_ip(self):
"""Return the IP of the DHCP server."""
if not self.api_ip:
dhcp_lease_file = self._get_dhcp_lease_file()
for line in open(dhcp_lease_file):
if 'dhcp-server-identifier' in line:
# get IP of string "option dhcp-server-identifier 185.19.28.176;"
line = line.translate(None, ';')
self.api_ip = line.split()[2]
break
if not self.api_ip:
module.fail_json(msg="No dhcp-server-identifier found in leases file.")
return self.api_ip
def main():
global module
module = AnsibleModule(
argument_spec = dict(
filter = dict(default=None, choices=[
'cloudstack_service_offering',
'cloudstack_availability_zone',
'cloudstack_public_hostname',
'cloudstack_public_ipv4',
'cloudstack_local_hostname',
'cloudstack_local_ipv4',
'cloudstack_instance_id',
'cloudstack_user_data',
]),
),
supports_check_mode=False
)
if not has_lib_yaml:
module.fail_json(msg="missing python library: yaml")
cs_facts = CloudStackFacts().run()
cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
module.exit_json(**cs_facts_result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.facts import *
if __name__ == '__main__':
main()
| {
"content_hash": "4ed64e67238d3a530e586620bbbfa6f7",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 127,
"avg_line_length": 31.54854368932039,
"alnum_prop": 0.6117864286813356,
"repo_name": "nwiizo/workspace_2017",
"id": "6f51127df65ba04c6b23a25ad986f1f032d88161",
"size": "7242",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "ansible-modules-extras/cloud/cloudstack/cs_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
"""
Space-Time animation for one dimensional cellular automata.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
##########################################################################
## Automata
##########################################################################
class Automata(object):
def __init__(self, rule, width=100, height=100, randstart=False):
self.width = width
self.height = height
self.rule = rule
self.time = 0
self.init_world(randstart)
@property
def rule(self):
return self._rule
@rule.setter
def rule(self, rule):
if isinstance(rule, basestring):
rule = int(rule)
if isinstance(rule, int):
self._rule = [(rule/pow(2,i) % 2) for i in range(8)]
else:
self._rule = rule
@property
def shape(self):
return (self.height, self.width)
def init_world(self, randstart=False):
self.world = np.zeros(shape=self.shape)
if randstart:
self.world[0] = np.random.choice((0,1), self.width, p=(0.2, 0.8))
else:
self.world[0,self.width/2] = 1
return self.world
def compute_states(self, state):
N = self.width
for j in range(N):
left = state[(j-1)%N]
cell = state[j]
right = state[(j+1)%N]
yield self.rule[int(4 * left + 2*cell + right )]
def __len__(self):
return self.width*self.height
def __iter__(self):
return self
def __next__(self):
# Get state at current time, then increment time
state = self.world[self.time]
self.time += 1
# Halting condition
if self.time >= self.height:
raise StopIteration()
# Calculate the world at this timestep
self.world[self.time] = np.array(list(self.compute_states(state)))
return self.world[self.time]
next = __next__
if __name__ == '__main__':
from animation import AutomataAnimation
#automata = Automata(110, width=1280, height=720, randstart=False)
automata = Automata(110, width=100, height=100, randstart=True)
animation = AutomataAnimation(automata)
animation.show()
| {
"content_hash": "6353f47352c656e767261162a8466713",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 77,
"avg_line_length": 27.697674418604652,
"alnum_prop": 0.4966414777497901,
"repo_name": "bbengfort/cellular-automata",
"id": "9b41166fe5b7688855f2a2ca1646e6aced798937",
"size": "2656",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyca/simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15714"
}
],
"symlink_target": ""
} |
"""
heroku.helpers
~~~~~~~~~~~~~~
This module contians the helpers.
"""
from datetime import datetime
from dateutil.parser import parse as parse_datetime
import sys
if sys.version_info > (3, 0):
basestring = (str, bytes)
def is_collection(obj):
"""Tests if an object is a collection."""
col = getattr(obj, '__getitem__', False)
val = False if (not col) else True
if isinstance(obj, basestring):
val = False
return val
# from kennethreitz/python-github3
def to_python(obj,
in_dict,
str_keys=None,
date_keys=None,
int_keys=None,
object_map=None,
bool_keys=None,
dict_keys=None,
**kwargs):
"""Extends a given object for API Consumption.
:param obj: Object to extend.
:param in_dict: Dict to extract data from.
:param string_keys: List of in_dict keys that will be extracted as strings.
:param date_keys: List of in_dict keys that will be extrad as datetimes.
:param object_map: Dict of {key, obj} map, for nested object results.
"""
d = dict()
if str_keys:
for in_key in str_keys:
d[in_key] = in_dict.get(in_key)
if date_keys:
for in_key in date_keys:
in_date = in_dict.get(in_key)
try:
out_date = parse_datetime(in_date)
except TypeError as e:
raise e
out_date = None
d[in_key] = out_date
if int_keys:
for in_key in int_keys:
if (in_dict is not None) and (in_dict.get(in_key) is not None):
d[in_key] = int(in_dict.get(in_key))
if bool_keys:
for in_key in bool_keys:
if in_dict.get(in_key) is not None:
d[in_key] = bool(in_dict.get(in_key))
if dict_keys:
for in_key in dict_keys:
if in_dict.get(in_key) is not None:
d[in_key] = dict(in_dict.get(in_key))
if object_map:
for (k, v) in object_map.items():
if in_dict.get(k):
d[k] = v.new_from_dict(in_dict.get(k))
obj.__dict__.update(d)
obj.__dict__.update(kwargs)
# Save the dictionary, for write comparisons.
# obj._cache = d
# obj.__cache = in_dict
return obj
# from kennethreitz/python-github3
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None):
"""Extends a given object for API Production."""
# Cast all int_keys to int()
if int_keys:
for in_key in int_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
in_dict[in_key] = int(in_dict[in_key])
# Cast all date_keys to datetime.isoformat
if date_keys:
for in_key in date_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
_from = in_dict[in_key]
if isinstance(_from, basestring):
dtime = parse_datetime(_from)
elif isinstance(_from, datetime):
dtime = _from
in_dict[in_key] = dtime.isoformat()
elif (in_key in in_dict) and in_dict.get(in_key, None) is None:
del in_dict[in_key]
# Remove all Nones
for k, v in in_dict.items():
if v is None:
del in_dict[k]
return in_dict
| {
"content_hash": "9697d7257509772b708a618c8b9f0d3d",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 25.713178294573645,
"alnum_prop": 0.5562255049743744,
"repo_name": "musclegenes/heroku.py-3.4",
"id": "26f45a6c2ded541ad5b30b48a03ed6d857ee5ffe",
"size": "3342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heroku/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29213"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2014 Roland Bettinelli
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Core BPMN Package - Common
'''
from Core.Foundation.models import RootElement, BaseElement
from Core.Common.fonctions import residual_args
##########################################################
# Artifacts
AssociationDirection = ['None', 'One', 'Both']
class Artifact(BaseElement):
'''
'''
def __init__(self, id, **kwargs):
'''
'''
super(Artifact, self).__init__(id, **kwargs)
if self.__class__.__name__=='Artifact':
residual_args(self.__init__, **kwargs)
class Association(Artifact):
'''
The Association element inherits the attributes and model associations of BaseElement.
'''
def __init__(self, id, sourceRef, targetRef, associationDirection='None', **kwargs):
'''
sourceRef:BaseElement
The BaseElement that the Association is connecting from.
targetRef:BaseElement
The BaseElement that the Association is connecting to.
associationDirection:AssociationDirection enum (default='None') {'None'|'One'|'Both'}
associationDirection is an attribute that defines whether or not the Association shows any directionality with an arrowhead.
A value of One means that the arrowhead SHALL be at the Target Object.
A value of Both means that there SHALL be an arrowhead at both ends of the Association line.
'''
super(Association,self).__init__(id, **kwargs)
self.sourceRef = sourceRef
self.targetRef = targetRef
if associationDirection in AssociationDirection:
self.associationDirection = associationDirection
else:
raise Exception #to be precised
if self.__class__.__name__=='Association':
residual_args(self.__init__, **kwargs)
class Group(Artifact):
'''
The Group object is an Artifact that provides a visual mechanism to group elements of a diagram informally.
The grouping is tied to the CategoryValue supporting element.
That is, a Group is a visual depiction of a single CategoryValue.
The graphical elements within the Group will be assigned the CategoryValue of the Group.
'''
def __init__(self, id, **kwargs):
'''
categoryValueRef:CategoryValue
The categoryValueRef attribute specifies the CategoryValue that the Group represents.
The name of the Category and the value of the CategoryValue separated by delineator "." provides the label for the Group.
The graphical elements within the boundaries of the Group will be assigned the CategoryValue.
'''
super(Group,self).__init__(id, **kwargs)
self.categoryValueRef = kwargs.pop('categoryValueRef', None)
if self.__class__.__name__=='Group':
residual_args(self.__init__, **kwargs)
class Category(RootElement):
'''
'''
def __init__(self, id, name, **kwargs):
'''
name:str
The descriptive name of the element.
categoryValue: CategoryValue list
The categoryValue attribute specifies one or more values of the Category.
'''
super(Category, self).__init__(id, **kwargs)
self.name = name
self.categoryValue = kwargs.pop('categoryValue',[])
if self.__class__.__name__=='Category':
residual_args(self.__init__, **kwags)
class CategoryValue(BaseElement):
'''
'''
def __init__(self, id, value, **kwargs):
'''
value:str
This attribute provides the value of the CategoryValue element.
category:Category
The category attribute specifies the Category representing the Category as such and contains the CategoryValue.
categorizedFlowElements:FlowElement list
The FlowElements attribute identifies all of the elements (e.g., Events,
Activities, Gateways, and Artifacts) that are within the boundaries of the Group.
'''
super(CategoryValue, self).__init__(id, **kwargs)
self.value = value
self.category = kwargs.pop('category',None)
self.categorizedFlowElements = kwargs.pop('categorizedFlowElements',[])
class TextAnnotation(Artifact):
'''
'''
def __init__(self, id, text, textFormat='text/plain', **kwargs):
'''
text:str
Text is an attribute that is text that the modeler wishes to communicate to the reader of the Diagram.
textFormat:str (default='text/plain')
This attribute identifies the format of the text.
It MUST follow the mimetype format.
'''
super(TextAnnotation, self).__init__(self, id, **kwargs)
self.text = text
self.textFormat = textFormat
##########################################################
# Correlations
class CorrelationKey(BaseElement):
'''
A CorrelationKey represents a composite key out of one or many CorrelationProperties that essentially specify extraction Expressions atop Messages.
As a result, each CorrelationProperty acts as a partial key for the correlation.
For each Message that is exhanged as part of a particular Conversation, the CorrelationProperties need to provide
a CorrelationPropertyRetrievalExpression which references a FormalExpression to the Message payload.
That is, for each Message (that is used in a Conversation) there is an Expression, which extracts portions of the respective Message's payload.
'''
def __init__(self, id, **kwargs):
'''
name:str
Specifies the name of the CorrelationKey.
correlationPropertyRef:CorrelationProperty list
The CorrelationProperties, representing the partial keys of this CorrelationKey.
'''
super(CorrelationKey,self).__init__(id, **kwargs)
self.name = kwargs.pop('name',None)
self.correlationPropertyRef = kwargs.pop('correlationPropertyRef',[])
if self.__class__.__name__=='CorrelationKey':
residual_args(self.__init__, **kwargs)
class CorrelationProperty(RootElement):
'''
'''
def __init__(self, id, correlationPropertyRetrievalExpression, **kwargs):
'''
name:str
Specifies the name of the CorrelationProperty.
type:str
Specifies the type of the CorrelationProperty.
correlationPropertyRetrievalExpression:CorrelationPropertyRetrievalExpression list (min len = 1)
The CorrelationPropertyRetrievalExpressions for this CorrelationProperty, representing the associations
of FormalExpressions (extraction paths) to specific Messages occurring in this Conversation.
'''
super(CorrelationProperty,self).__init__(id, **kwargs)
self.name = kwargs.pop('name',None)
self.type = kwargs.pop('type',None)
self.correlationPropertyRetrievalExpression = correlationPropertyRetrievalExpression
class CorrelationPropertyRetrievalExpression(BaseElement):
'''
'''
def __init__(self, id, messagePath, messageRef, **kwargs):
'''
messagePath:FormalExpression
The FormalExpression that defines how to extract a CorrelationProperty from the Message payload.
messageRef:Message
The specific Message the FormalExpression extracts the CorrelationProperty from.
'''
super(CorrelationPropertyRetrievalExpression,self).__init__(id, **kwargs)
self.messagePath = messagePath
self.messageRef = messageRef
if self.__class__.__name__=='CorrelationPropertyRetrievalExpression':
residual_args(self.__init__, **kwargs)
class CorrelationSubscription(BaseElement):
'''
'''
def __init__(self, id, correlationKeyRef, **kwargs):
'''
correlationKeyRef:CorrelationKey
The CorrelationKey this CorrelationSubscription refers to.
correlationPropertyBinding:CorrelationPropertyBinding list
The bindings to specific CorrelationProperties and FormalExpressions (extraction rules atop the Process context).
'''
super(CorrelationSubscription,self).__init__(id, **kwargs)
self.correlationKeyRef = correlationKeyRef
self.correlationPropertyBinding = kwargs.pop('correlationPropertyBinding',[])
if self.__class__.__name__=='CorrelationSubscription':
residual_args(self.__init__, **kwargs)
class CorrelationPropertyBinding(BaseElement):
'''
'''
def __init__(self, id, dataPath, correlationPropertyRef, **kwargs):
'''
dataPath:FormalExpression
The FormalExpression that defines the extraction rule atop the Process context.
correlationPropertyRef:CorrelationProperty
The specific CorrelationProperty, this CorrelationPropertyBinding refers to.
'''
super(CorrelationPropertyBinding,self).__init__(id, **kwargs)
self.dataPath = dataPath
self.correlationPropertyRef = correlationPropertyRef
if self.__class__.__name__=='CorrelationPropertyBinding':
residual_args(self.__init__, **kwargs)
##########################################################
# Error (as Error Event)
class Error(RootElement):
'''
'''
def __init__(self, id, name, errorCode, **kwargs):
'''
name:str
The descriptive name of the Error.
errorCode:str
For an End Event:
If the result is an Error, then the errorCode MUST be supplied
(if the processType attribute of the Process is set to executable)
This "throws" the Error.
For an Intermediate Event within normal flow:
If the trigger is an Error, then the errorCode MUST be entered
(if the processType attribute of the Process is set to executable).
This "throws" the Error.
For an Intermediate Event attached to the boundary of an Activity:
If the trigger is an Error, then the errorCode MAY be entered.
This Event "catches" the Error. If there is no errorCode, then
any error SHALL trigger the Event. If there is an errorCode, then
only an Error that matches the errorCode SHALL trigger the Event.
structureRef:ItemDefinition
An ItemDefinition is used to define the "payload" of the Error.
'''
super(Error,self).__init__(id, **kwargs)
self.name = name
self.errorCode = errorCode
self.structureRef = kwargs.pop('structureRef', None)
if self.__class__.__name__=='Error':
residual_args(self.__init__, **kwargs)
##########################################################
# Escalation
class Escalation(RootElement):
'''
'''
def __init__(self, id , name, escalationCode, **kwargs):
'''
name:str
The descriptive name of the Escalation.
escalationCode:str
For an End Event:
If the Result is an Escalation, then the escalationCode
MUST be supplied (if the processType attribute of the Process
is set to executable).
This "throws" the Escalation.
For an Intermediate Event within normal flow:
If the trigger is an Escalation, then the escalationCode
MUST be entered (if the processType attribute of the Process is
set to executable).
This "throws" the Escalation.
For an Intermediate Event attached to the boundary of an Activity:
If the trigger is an Escalation, then the escalationCode MAY
be entered. This Event "catches" the Escalation. If there is no
escalationCode, then any Escalation SHALL trigger the
Event. If there is an escalationCode, then only an Escalation
that matches the escalationCode SHALL trigger the
Event.
structureRef:ItemDefinition
An ItemDefinition is used to define the "payload" of the Escalation.
'''
self.name = name
self.escalationCode = escalationCode
self.structureRef = kwargs.pop('structureRef', None)
if self.__class__.__name__=='Escalation':
residual_args(self.__init__, **kwargs)
##########################################################
# Expressions
class Expression(BaseElement):
'''
The Expression class is used to specify an Expression using natural-language text.
These Expressions are not executable and are considered underspecified.
The definition of an Expression can be done in two ways: it can be contained where it is used, or
it can be defined at the Process level and then referenced where it is used.
The Expression element inherits the attributes and model associations of BaseElement,
but does not have any additional attributes or model associations.
'''
def __init__(self, id, **kwargs):
'''
'''
super(Expression, self).__init__(id, **kwargs)
if self.__class__.__name__=='Expression':
residual_args(self.__init__, **kwargs)
class FormalExpression(Expression):
'''
The FormalExpression class is used to specify an executable Expression using a specified Expression
language. A natural-language description of the Expression can also be specified, in addition to the formal specification.
The default Expression language for all Expressions is specified in the Definitions element, using the
expressionLanguage attribute. It can also be overridden on each individual FormalExpression using the same attribute.
'''
def __init__(self, id, body, evaluatesToTypeRef, **kwargs):
'''
body:Element
The body of the Expression.
evaluatesToTypeRef:ItemDefinition
The type of object that this Expression returns when evaluated.
For example, conditional Expressions evaluate to a boolean.
language:str
Overrides the Expression language specified in the Definitions.
The language MUST be specified in a URI format.
'''
super(FormalExpression,self).__init__(self, id, **kwargs)
self.body = body
self.evaluatesToTypeRef = evaluatesToTypeRef
self.language = kwargs.pop('language', None)
if self.__class__.__name__=='FormalExpression':
residual_args(self.__init__, **kwargs)
def _to_xml(self):
'''
Note that this attribute is not relevant when the XML Schema is used for
interchange. Instead, the FormalExpression complex type supports mixed
content. The body of the Expression would be specified as element content.
For example:
<formalExpression id="ID_2">
count(../dataObject[id="CustomerRecord_1"]/emailAddress) > 0
<evaluatesToType id="ID_3" typeRef=“xsd:boolean"/>
</formalExpression>
'''
pass
##########################################################
# Flows
class FlowElement(BaseElement):
'''
FlowElement is the abstract super class for all elements that can appear in a Process flow, which are FlowNodes.
'''
def __init__(self, id, **kwargs):
'''
name:str
The descriptive name of the element.
categoryValueRef:CategoryValue list
A reference to the Category Values that are associated with this FlowElement.
auditing:Auditing
A hook for specifying audit related properties.
Auditing can only be defined for a Process.
monitoring:Monitoring
A hook for specifying monitoring related properties.
Monitoring can only be defined for a Process.
'''
super(FlowElement, self).__init__(id, **kwargs)
self.name = kwargs.pop('name', None)
self.categoryValueRef = kwargs.pop('categoryValueRef',[])
self.auditing = kwargs.pop('auditing', None)
self.monitoring = kwargs.pop('monitoring', None)
if self.__class__.__name__=='FlowElement':
residual_args(self.__init__, **kwargs)
class FlowElementsContainer(BaseElement):
'''
FlowElementsContainer is an abstract super class for BPMN diagrams (or views) and defines the superset of elements that are contained in those diagrams.
Basically, a FlowElementsContainer contains FlowElements, which are Events, Gateways, Sequence Flows, Activities and Choreography Activities.
There are four types of FlowElementsContainers: Process, Sub-Process, Choreography, and Sub-Choreography.
'''
def __init__(self, id, **kwargs):
'''
flowElements:FlowElement list
This association specifies the particular flow elements contained in a FlowElementContainer.
Flow elements are Events, Gateways, SequenceFlows, Activities, Data Objects, Data Associations, and ChoreographyActivities.
Note that:
Choreography Activities MUST NOT be included as a flowElement for a Process.
Activities, Data Associations, and Data Objects MUST NOT be included as a flowElement for a Choreography.
laneSets:LaneSet list
This attribute defines the list of LaneSets used in the FlowElementsContainer LaneSets are not used for Choreographies or Sub-Choreographies.
'''
super(FlowElementsContainer,self).__init__(id, **kwargs)
self.flowElements = kwargs.pop('flowElements',[])
self.laneSets = kwargs.pop('laneSets',[])
if self.__class__.__name__=='FlowElementsContainer':
residual_args(self.__init__, **kwargs)
GatewayDirection = ['Unspecified','Converging','Diverging','Mixed']
class Gateway(FlowElement):
'''
The Gateway class is an abstract type.
Its concrete subclasses define the specific semantics of individual Gateway types, defining how the Gateway behaves in different situations.
'''
def __init__(self, id, gatewayDirection='Unspecified', **kwargs):
'''
gatewayDirection:GatewayDirection enum (default='Unspecified') {'Unspecified'|'Converging'|'Diverging'|'Mixed'}
An attribute that adds constraints on how the Gateway MAY be used :
Unspecified: There are no constraints. The Gateway MAY have any number of incoming and outgoing Sequence Flows.
Converging: This Gateway MAY have multiple incoming Sequence Flows but MUST have no more than one outgoing Sequence Flow.
Diverging: This Gateway MAY have multiple outgoing Sequence Flows but MUST have no more than one incoming Sequence Flow.
Mixed: This Gateway contains multiple outgoing and multiple incoming Sequence Flows.
'''
super(Gateway,self).__init__(id, **kwargs)
if gatewayDirection in GatewayDirection:
self.gatewayDirection = gatewayDirection
else:
raise Exception #to be detailed
if self.__calss__.__name__=='Gateway':
residual_args(self.__init__, **kwargs)
##########################################################
# Item Definition
ItemKind = ['Information','Physical']
class ItemDefinition(RootElement):
'''
An ItemDefinition element can specify an import reference where the proper definition of the structure is defined.
In cases where the data structure represents a collection, the multiplicity can be projected into the attribute isCollection.
If this attribute is set to "true", but the actual type is not a collection type, the model is considered as invalid.
BPMN compliant tools might support an automatic check for these inconsistencies and report this as an error.
The itemKind attribute specifies the nature of an item which can be a physical or an information item.
'''
def __init__(self, id, itemKind='Information', isCollection=False, **kwargs):
'''
itemKind:ItemKind enum (default='Information') {'Information'|'Physical'}
This defines the nature of the Item. Possible values are physical or information.
isCollection:bool (default=False)
Setting this flag to true indicates that the actual data type is a collection.
structureRef: Element
The concrete data structure to be used.
import:Import
Identifies the location of the data structure and its format.
If the importType attribute is left unspecified, the typeLanguage specified in
the Definitions that contains this ItemDefinition is assumed.
'''
super(ItemDefinition,self).__init__(id, **kwargs)
if itemKind in ItemKind:
self.itemKind = itemKind
else:
raise Exception #tbd
self.isCollection = isCollection
self.structureRef = kwargs.pop('structureRef',None)
#self.import is not valid in python, use of import_ instead
self.import_ = kwargs.pop('import',None)
if self.__class__.__name__=='ItemDefinition':
residual_args(self.__init__, **kwargs)
##########################################################
# Message
class Message(RootElement):
'''
'''
def __init__(self, id, name, **kwargs):
'''
name:str
Name is a text description of the Message.
itemRef:ItemDefinition
An ItemDefinition is used to define the "payload" of the Message.
'''
super(Message, self).__init__(id, **kwargs)
self.name = name
self.itemRef = kwargs.pop('itemRef',None)
if self.__class__.__name__=='Message':
residual_args(self.__init__, **kwargs)
##########################################################
# Resource
class Resource(RootElement):
'''
The Resource class is used to specify resources that can be referenced by Activities.
These Resources can be Human Resources as well as any other resource assigned to Activities during Process execution time.
The definition of a Resource is "abstract", because it only defines the Resource, without detailing how e.g.,
actual user IDs are associated at runtime. Multiple Activities can utilize the same Resource.
'''
def __init__(self, id, name, **kwargs):
'''
name:str
This attribute specifies the name of the Resource.
resourceParameters:ResourceParameter list
This model association specifies the definition of the parameters needed at runtime to resolve the Resource.
'''
super(Resource, self).__init__(id, **kwargs)
self.name = name
self.resourceParameters = kwargs.pop('resourceParameters',[])
if self.__class__.__name__=='Resource':
residual_args(self.__init__, **kwargs)
class ResourceParameter(BaseElement): #inconsistency of OMG spec on inheritance of ResourceParameter (RootElement or BaseElement)
'''
The Resource can define a set of parameters to define a query to resolve the actual resources (e.g., user ids).
'''
def __init__(self, id, name, type, isRequired, **kwargs):
'''
name:str
Specifies the name of the query parameter.
type:ItemDefinition
Specifies the type of the query parameter.
isRequired:bool
Specifies, if a parameter is optional or mandatory.
'''
super(ResourceParameter, self).__init__(id, **kwargs)
self.name = name
self.type = type
self.isRequired = isRequired
if self.__class__.__name__=='ResourceParameter':
residual_args(self.__init__, **kwargs)
##########################################################
# Sequence Flow
# A Sequence Flow is used to show the order of Flow Elements in a Process or a Choreography. Each
# Sequence Flow has only one source and only one target. The source and target MUST be from the set of the following
# Flow Elements: Events (Start, Intermediate, and End), Activities (Task and Sub-Process; for Processes),
# Choreography Activities (Choreography Task and Sub-Choreography; for Choreographies), and
# Gateways.
# A Sequence Flow can optionally define a condition Expression, indicating that the token will be passed down the
# Sequence Flow only if the Expression evaluates to true. This Expression is typically used when the source of
# the Sequence Flow is a Gateway or an Activity.
class SequenceFlow(FlowElement):
'''
'''
def __init__(self, id, sourceRef, targetRef, **kwargs):
'''
sourceRef:FlowNode
The FlowNode that the Sequence Flow is connecting from.
For a Process: Of the types of FlowNode, only Activities, Gateways, and Events can be the source. However, Activities that are Event Sub-Processes are not allowed to be a source.
For a Choreography: Of the types of FlowNode, only Choreography Activities, Gateways, and Events can be the source.
targetRef:FlowNode
The FlowNode that the Sequence Flow is connecting to.
For a Process: Of the types of FlowNode, only Activities, Gateways, and Events can be the target. However, Activities that are Event Sub-Processes are not allowed to be a target.
For a Choreography: Of the types of FlowNode, only Choreography Activities, Gateways, and Events can be the target.
conditionExpression:Expression
An optional boolean Expression that acts as a gating condition.
A token will only be placed on this Sequence Flow if this conditionExpression evaluates to True.
isImmediate:bool
An optional boolean value specifying whether Activities or Choreography Activities not in the model containing the Sequence Flow
can occur between the elements connected by the Sequence Flow.
If the value is true, they MAY NOT occur.
If the value is false, they MAY occur.
Also see the isClosed attribute on Process, Choreography, and Collaboration.
When the attribute has no value, the default semantics depends on the kind of model containing Sequence Flows:
For non-executable Processes (public Processes and non-executable private Processes) and Choreographies no value has the same semantics as if the value were False.
For an executable Processes no value has the same semantics as if the value were True.
For executable Processes, the attribute MUST NOT be false.
'''
super(SequenceFlow, self).__init__(id, **kwargs)
self.sourceRef = sourceRef
self.targetRef = targetRef
self.conditionExpression = kwargs.pop('conditionExpression', None)
self.isImmediate = kwargs.pop('isImmediate', None)
if self.__class__.__init__=='conditionExpression':
residual_args(self.__init__, **kwargs)
class FlowNode(FlowElement): #once again inconsistency between figures and text about inheritance in OMG spec
'''
The FlowNode element is used to provide a single element as the source and target Sequence Flow associations instead of the individual associations of the elements that can connect to Sequence Flows.
Only the Gateway, Activity, Choreography Activity, and Event elements can connect to Sequence Flows and thus, these elements are the only ones that are sub-classes of FlowNode.
Since Gateway, Activity, Choreography Activity, and Event have their own attributes, model associations, and inheritances; the FlowNode element does not inherit from any other BPMN element.
'''
def __init__(self, id, **kwargs):
'''
incoming:SequenceFlow list
This attribute identifies the incoming Sequence Flow of the FlowNode.
outgoing:SequenceFlow list
This attribute identifies the outgoing Sequence Flow of the FlowNode.
This is an ordered collection.
'''
super(FlowNode, self).__init__(id, **kwargs)
self.incoming = kwargs.pop('incoming',[])
self.outgoing = kwargs.pop('outgoing',[])
if self.__class__.__name__=='FlowNode':
residual_args(self.__init__, **kwargs)
##########################################################
# Entities and Organisations
class PartnerEntity(BaseElement):
'''
A PartnerEntity is one of the possible types of Participant.
'''
def __init__(self,id, name, **kwargs):
'''
name:str
Name is a text description of the PartnerEntity.
participantRef:Participant list
Specifies how the PartnerEntity participates in Collaborations and Choreographies.
'''
super(PartnerEntity,self).__init__(id, **kwargs)
self.name = name
self.participantRef = kwargs.pop('participantRef',[])
if self.__class__.__name__=='PartnerEntity':
residual_args(self.__init__, **kwargs)
class PartnerRole(BaseElement):
'''
A PartnerRole is one of the possible types of Participant.
'''
def __init__(self,id, name, **kwargs):
'''
name:str
Name is a text description of the PartnerRole.
participantRef:Participant list
Specifies how the PartnerRole participates in Collaborations and Choreographies.
'''
super(PartnerRole,self).__init__(id, **kwargs)
self.name = name
self.participantRef = kwargs.pop('participantRef',[])
if self.__class__.__name__=='PartnerRole':
residual_args(self.__init__, **kwargs)
##########################################################
# Events
# An Event is something that "happens" during the course of a Process. These Events affect the flow of the Process
# and usually have a cause or an impact. The term "event" is general enough to cover many things in a Process. The start
# of an Activity, the end of an Activity, the change of state of a document, a Message that arrives, etc., all could be
# considered Events. However, BPMN has restricted the use of Events to include only those types of Events that will
# affect the sequence or timing of Activities of a Process.
##########################################################
# TBD
class CallableElement(RootElement):
'''
'''
def __init__(self, id, **kwargs):
'''
name:str
'''
super(CallableElement, self).__init__(id, **kwargs)
self.name = kwargs.pop('name', None)
if self.__class__.__name__=='CallableElement':
residual_args(self.__init__, **kwargs)
| {
"content_hash": "6974acea8ba8c154cddd094c5abd7a43",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 203,
"avg_line_length": 45.34818941504178,
"alnum_prop": 0.633507371007371,
"repo_name": "Glorfindelrb/pyBPMN20engine",
"id": "3f29f4de2ab71a38c489e656b6bd7dfe704f1645",
"size": "32564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Core/Common/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119050"
}
],
"symlink_target": ""
} |
"""
Sample code usage of the pyTournamentTracker
"""
from tournament import *
def initial_setup():
"""
Deletes the players and matches for a clean start
"""
delete_matches()
delete_players()
def basic_usage():
"""
Basic usage of the pyTournamentTracker library
"""
print('Sample Usage of the python Tournament Tracker')
answer = input('Register(r) players manually or add default(d) players? (r/d): ')
if answer.lower() == 'd':
p_id = register_player('Player 1')
print('Registered: Player 1 as {}'.format(p_id))
p_id = register_player('Player 2')
print('Registered: Player 2 as {}'.format(p_id))
p_id = register_player('Player 3')
print('Registered: Player 3 as {}'.format(p_id))
p_id = register_player('Player 4')
print('Registered: Player 4 as {}'.format(p_id))
print('Total Players registered: {}'.format(count_players()))
elif answer.lower() == 'r':
total_players = input('How many players would you like to register?')
for i in range(int(total_players)):
name = input('Player name: ')
p_id = register_player(name)
print('Registered: {} as {}'.format(name, p_id))
else:
exit('Invalid command {}'.format(answer))
print('\nNOTE: Report match by ID given.\n')
while True:
print('Initial Player standings')
standings = player_standings()
for s in standings:
print('Player: {} ID: {} -- Games Played: {} -- Wins: {}'.format(s[1], s[0], s[3], s[2]))
while True:
match_cont = input('Report match: (y/n): ')
if match_cont.lower() == 'y':
winner = input('Match winner: ')
loser = input('Match loser: ')
report_match({winner: True, loser: False})
report = swiss_pairings()
print('Current valid pairings: ')
for r in report:
print('Valid pairing: {}, ID: {} - {}, ID: {}'.format(r[1], r[0], r[3], r[2]))
else:
break
print('Current Standings')
standings = player_standings()
for s in standings:
print('Player: {} -- ID: {} -- Games Played: {} -- Wins: {}'.format(s[1], s[0], s[3], s[2]))
cont = input('Continue: (y/n)')
if cont.lower() == 'y':
continue
else:
break
if __name__ == '__main__':
initial_setup()
basic_usage()
| {
"content_hash": "37cee5fb076fff0c5eb984dfc9a06e6c",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 104,
"avg_line_length": 34.63013698630137,
"alnum_prop": 0.5292721518987342,
"repo_name": "MFry/pyTournamentTracker",
"id": "06370ab6fb8c9e7400bd64b33c7efeaaaccff08e",
"size": "2550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vagrant/tournament/sample_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "39"
},
{
"name": "Python",
"bytes": "31922"
},
{
"name": "Ruby",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "3248"
}
],
"symlink_target": ""
} |
import os.path
from os.path import dirname
from datetime import datetime, timedelta, tzinfo
from time import time, gmtime, strftime
import hashlib
import logging
import requests
import urllib
import json
from urlparse import urljoin
from constance import config as c_config
import bleach
try:
from commons.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from tower import ugettext_lazy as _
except ImportError, e:
from django.utils.translation import ugettext_lazy as _
try:
from PIL import Image
except ImportError:
import Image
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
MAX_USERNAME_CHANGES = getattr(settings, 'PROFILE_MAX_USERNAME_CHANGES', 3)
IMG_MAX_SIZE = getattr(settings, "PROFILE_IMG_MAX_SIZE", (256, 256))
# Set up a file system for badge uploads that can be kept separate from the
# rest of /media if necessary. Lots of hackery to ensure sensible defaults.
UPLOADS_ROOT = getattr(settings, 'PROFILE_UPLOADS_ROOT',
os.path.join(getattr(settings, 'MEDIA_ROOT', 'media/'), 'uploads'))
UPLOADS_URL = getattr(settings, 'PROFILE_UPLOADS_URL',
urljoin(getattr(settings, 'MEDIA_URL', '/media/'), 'uploads/'))
PROFILE_UPLOADS_FS = FileSystemStorage(location=UPLOADS_ROOT,
base_url=UPLOADS_URL)
def scale_image(img_upload, img_max_size):
"""Crop and scale an image file."""
try:
img = Image.open(img_upload)
except IOError:
return None
src_width, src_height = img.size
src_ratio = float(src_width) / float(src_height)
dst_width, dst_height = img_max_size
dst_ratio = float(dst_width) / float(dst_height)
if dst_ratio < src_ratio:
crop_height = src_height
crop_width = crop_height * dst_ratio
x_offset = int(float(src_width - crop_width) / 2)
y_offset = 0
else:
crop_width = src_width
crop_height = crop_width / dst_ratio
x_offset = 0
y_offset = int(float(src_height - crop_height) / 2)
img = img.crop((x_offset, y_offset,
x_offset + int(crop_width), y_offset + int(crop_height)))
img = img.resize((dst_width, dst_height), Image.ANTIALIAS)
if img.mode != "RGB":
img = img.convert("RGB")
new_img = StringIO()
img.save(new_img, "PNG")
img_data = new_img.getvalue()
return ContentFile(img_data)
def mk_upload_to(field_fn):
"""upload_to builder for file upload fields"""
def upload_to(instance, filename):
base, slug = instance.get_upload_meta()
time_now = int(time())
return '%(base)s/%(slug)s/%(time_now)s_%(field_fn)s' % dict(
time_now=time_now, slug=slug, base=base, field_fn=field_fn)
return upload_to
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
username_changes = models.IntegerField(default=0)
is_confirmed = models.BooleanField(default=False)
display_name = models.CharField(max_length=64, blank=True, null=True,
unique=False)
avatar = models.ImageField(blank=True, null=True,
storage=PROFILE_UPLOADS_FS,
upload_to=mk_upload_to('avatar.png'))
bio = models.TextField(blank=True)
organization = models.CharField(max_length=255, default='', blank=True)
location = models.CharField(max_length=255, default='', blank=True)
created = models.DateTimeField(auto_now_add=True, blank=False)
modified = models.DateTimeField(auto_now=True, blank=False)
class Meta:
pass
def __unicode__(self):
return (self.display_name and
self.display_name or self.user.username)
def get_absolute_url(self):
return reverse('profiles.profile_view',
kwargs={'username':self.user.username})
def get_upload_meta(self):
return ("profile", self.user.username)
def allows_edit(self, user):
if user == self.user:
return True
if user.is_staff or user.is_superuser:
return True
return False
@property
def bio_html(self):
return bleach.clean(bleach.linkify(self.bio))
def username_changes_remaining(self):
return MAX_USERNAME_CHANGES - self.username_changes
def can_change_username(self, user=None):
if self.username_changes_remaining() > 0:
return True
return False
def change_username(self, username, user=None):
if not self.can_change_username(user):
return False
if User.objects.filter(username=username).count() > 0:
return False
if username != self.user.username:
self.user.username = username
self.user.save()
self.username_changes += 1
self.save()
return True
def clean(self):
if self.avatar:
scaled_file = scale_image(self.avatar.file, IMG_MAX_SIZE)
if not scaled_file:
raise ValidationError(_('Cannot process image'))
self.avatar.file = scaled_file
def is_vouched_mozillian(self):
"""Check whether this profile is associated with a vouched
mozillians.org profile"""
MOZILLIANS_API_BASE_URL = c_config.MOZILLIANS_API_BASE_URL
MOZILLIANS_API_APPNAME = c_config.MOZILLIANS_API_APPNAME
MOZILLIANS_API_KEY = c_config.MOZILLIANS_API_KEY
MOZILLIANS_API_CACHE_KEY_PREFIX = c_config.MOZILLIANS_API_CACHE_KEY_PREFIX
MOZILLIANS_API_CACHE_TIMEOUT = c_config.MOZILLIANS_API_CACHE_TIMEOUT
if not MOZILLIANS_API_KEY:
logging.warning("'MOZILLIANS_API_KEY' not set up.")
return False
email = self.user.email
# /api/v1/users/?app_name=foobar&app_key=12345&email=test@example.com
url = '%s/users/?%s' % (MOZILLIANS_API_BASE_URL, urllib.urlencode({
'app_name': MOZILLIANS_API_APPNAME,
'app_key': MOZILLIANS_API_KEY,
'email': email
}))
# Cache the HTTP request to the API to minimize hits
cache_key = '%s:%s' % (MOZILLIANS_API_CACHE_KEY_PREFIX,
hashlib.md5(url.encode('utf-8')).hexdigest())
content = cache.get(cache_key)
if not content:
resp = requests.get(url)
if not resp.status_code == 200:
logging.error("Failed request to mozillians.org API: %s" %
resp.status_code)
return False
else:
content = resp.content
cache.set(cache_key, content, MOZILLIANS_API_CACHE_TIMEOUT)
try:
content = json.loads(content)
except ValueError:
logging.error("Failed parsing mozillians.org response")
return False
for obj in content.get('objects', []):
if obj['email'].lower() == email.lower():
return obj['is_vouched']
return False
def autocreate_user_profile(self):
"""Ensure user profile exists when accessed"""
profile, created = UserProfile.objects.get_or_create(
user=User.objects.get(id=self.id),
defaults=dict())
return profile
# HACK: monkeypatch User.get_profile to ensure the profile exists
User.get_profile = autocreate_user_profile
# HACK: monkeypatch User.__unicode__ to use profile display_name when available
def user_display_name(self):
return unicode(self.get_profile())
User.__unicode__ = user_display_name
# HACK: monkeypatch User.get_absolute_url() to return profile URL
def user_get_absolute_url(self):
return self.get_profile().get_absolute_url()
User.get_absolute_url = user_get_absolute_url
| {
"content_hash": "e3cc64454c46a89b588ae0855618254a",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 82,
"avg_line_length": 32.49800796812749,
"alnum_prop": 0.637244084835111,
"repo_name": "deepankverma/badges.mozilla.org",
"id": "29c539bcbb56d0cef687e663c29f9ac46bec7e49",
"size": "8157",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "badgus/profiles/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10452"
},
{
"name": "HTML",
"bytes": "83183"
},
{
"name": "JavaScript",
"bytes": "6538"
},
{
"name": "Python",
"bytes": "407537"
},
{
"name": "Shell",
"bytes": "871"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
} |
import os
def main(j, args, params, tags, tasklet):
page = args.page
page.addCSS(cssContent='''
.bigpicture{
margin: 10px 0 15px 0;
}
.bigpicture-container{
text-align: center;
}
.subtitle{
margin-bottom: 10px;
display: block;
}
.subtitle-paragraph{
margin-bottom: 5px;
}
.bigpicture-container h1.small{
font-size: 25px;
}
.bigpicture-container h1.medium{
font-size: 30px;
}
.bigpicture-container h1.large{
font-size: 35px;
}
.bigpicture-container h1.xlarge{
font-size: 40px;
}
.subtitle.small, .subtitle-paragraph.small, .subtitle-link.small{
font-size: 14px;
}
.subtitle.medium, .subtitle-paragraph.medium, .subtitle-link.medium{
font-size: 16px;
}
.subtitle.large, .subtitle-paragraph.large, .subtitle-link.large{
font-size: 18px;
}
''')
hrd = j.core.hrd.getHRD(content=args.cmdstr)
bigpicture = {}
bigpicture['picturePath'] = ""
bigpicture['titleText'] = hrd.get('title.text', '')
bigpicture['titleSize'] = hrd.get('title.size', 'medium')
bigpicture['subtitleText'] = hrd.get('subtitle.text', '')
bigpicture['subtitleSize'] = hrd.get('subtitle.size', 'medium')
bigpicture['paragraphText'] = hrd.get('paragraph.text', '')
bigpicture['paragraphSize'] = hrd.get('paragraph.size', 'medium')
bigpicture['subtitleLink'] = hrd.get('subtitle.link', '')
bigpicture['subtitleLinkText'] = hrd.get('subtitle.link.text', '')
bigpicture['subtitleLinkSize'] = hrd.get('subtitle.link.size', 'medium')
# check if can find image under .files/img by the given name
space = j.core.portal.active.spacesloader.spaces[args.doc.getSpaceName()]
imagedir = j.system.fs.joinPaths(space.model.path, '.files', 'img/')
if os.path.isfile(imagedir + hrd.get('picture.path', '')):
bigpicture['picturePath'] = '/$$space/.files/img/' + hrd.get('picture.path', '')
else:
# image from full url
bigpicture['picturePath'] = hrd.get('picture.path', '')
page.addMessage('''
<div class="bigpicture-container">
<div class="container">
<h1 class="title {titleSize}">{titleText}</h1>
<div class="span10 offset1">
<img class="bigpicture img-rounded" src="{picturePath}">
<div class="subtitle-container">
<strong class="subtitle {subtitleSize}">{subtitleText}</strong>
<p class="subtitle-paragraph {paragraphSize}">{paragraphText}</p>
<a class="subtitle-link {subtitleLinkSize}" href="{subtitleLink}">{subtitleLinkText}</a>
</div>
</div>
</div>
</div>
'''.format(**bigpicture))
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
| {
"content_hash": "69d1feda3560d731b67bb687be5407fc",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 94,
"avg_line_length": 30.988095238095237,
"alnum_prop": 0.6699961582789089,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "c310251a2da4f52ebffef5671f6f35da7430e26d",
"size": "2603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/portalbase/macros/page/bigpicture/1_bigpicture.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
""" TF 2.0 BlenderbotSmall model."""
import random
from typing import List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
ContextManagers,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_blenderbot_small import BlenderbotSmallConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
_CONFIG_FOR_DOC = "BlenderbotSmallConfig"
_TOKENIZER_FOR_DOC = "BlenderbotSmallTokenizer"
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill(
(shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100,
tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
shifted_input_ids,
)
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz = input_ids_shape[0]
tgt_len = input_ids_shape[1]
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
class TFBlenderbotSmallLearnedPositionalEmbedding(tf.keras.layers.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
super().__init__(num_embeddings, embedding_dim, **kwargs)
def call(
self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: Optional[tf.Tensor] = None
):
"""Input is expected to be of size [bsz x seqlen]."""
if position_ids is None:
seq_len = input_shape[1]
position_ids = tf.range(seq_len, delta=1, name="range")
position_ids += past_key_values_length
return super().call(tf.cast(position_ids, dtype=tf.int32))
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall
class TFBlenderbotSmallAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {shape_list(attn_weights)}"
),
)
if attention_mask is not None:
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {shape_list(attention_mask)}"
),
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = stable_softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=(
f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
f" {shape_list(layer_head_mask)}"
),
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {shape_list(attn_output)}"
),
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall
class TFBlenderbotSmallEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: BlenderbotSmallConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFBlenderbotSmallAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]],
layer_head_mask: Optional[tf.Tensor],
training: Optional[bool] = False,
) -> tf.Tensor:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, self_attn_weights
# Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall
class TFBlenderbotSmallDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: BlenderbotSmallConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFBlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFBlenderbotSmallAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel):
config_class = BlenderbotSmallConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.math.not_equal(input_ids, pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
BLENDERBOT_SMALL_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Args:
config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
Conversation example::
>>> from transformers import BlenderbotSmallTokenizer, TFBlenderbotSmallForConditionalGeneration >>> mname =
'facebook/blenderbot_small-90M' >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) >>>
tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)
>>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> print("Human: ", UTTERANCE) >>> inputs =
tokenizer([UTTERANCE], return_tensors='tf')
>>> reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(reply_ids,
skip_special_tokens=True)[0]) what kind of carbs do they eat? i don't know much about carbs.
>>> REPLY = "I'm not sure" >>> print("Human: ", REPLY) >>> NEXT_UTTERANCE = ( ... "My friends are cool but they
eat too many carbs.</s> " ... "<s>what kind of carbs do they eat? i don't know much about carbs.</s> " ...
"<s>I'm not sure." ... )
>>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors='tf') >>> inputs.pop("token_type_ids") >>>
next_reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids,
skip_special_tokens=True)[0])
"""
BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
Args:
input_ids (`tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BlenderbotSmallTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`BlenderbotSmallTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tf.FloatTensor`, *optional*):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`). Set to `False` during training, `True` during generation
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFBlenderbotSmallEncoder(tf.keras.layers.Layer):
config_class = BlenderbotSmallConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TFBlenderbotSmallEncoderLayer`].
Args:
config: BlenderbotSmallConfig
"""
def __init__(
self, config: BlenderbotSmallConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs
):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`BlenderbotSmallTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
# if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name
# scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope`
# is used with a name ending in `/`, that name replaces the current name scope.
# (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0)
context = []
if hasattr(self.embed_tokens, "load_weight_prefix"):
context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/"))
with ContextManagers(context):
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})"
),
)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
# check attention mask and invert
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
tf.debugging.assert_equal(
shape_list(head_mask)[0],
len(self.layers),
message=(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(head_mask)[0]}."
),
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
)
if output_attentions:
all_attentions += (attn,)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFBlenderbotSmallDecoder(tf.keras.layers.Layer):
config_class = BlenderbotSmallConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`]
Args:
config: BlenderbotSmallConfig
embed_tokens: output embedding
"""
def __init__(
self, config: BlenderbotSmallConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs
):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`BlenderbotSmallTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids`
you can choose to directly pass an embedded representation. This is useful if you want more control
over how to convert `input_ids` indices into associated vectors than the model's internal embedding
lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
if inputs_embeds is None:
# if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name
# scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope`
# is used with a name ending in `/`, that name replaces the current name scope.
# (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0)
context = []
if hasattr(self.embed_tokens, "load_weight_prefix"):
context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/"))
with ContextManagers(context):
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})"
),
)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if attention_mask is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
# embed positions
if position_ids is None:
positions = self.embed_positions(input_shape, past_key_values_length)
else:
positions = self.embed_positions(input_shape, position_ids=position_ids)
hidden_states = self.layernorm_embedding(inputs_embeds) + positions
hidden_states = self.dropout(hidden_states, training=training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
present_key_values = () if use_cache else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
if attn_mask is not None:
tf.debugging.assert_equal(
shape_list(attn_mask)[0],
len(self.layers),
message=(
f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(attn_mask)[0]}."
),
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
past_key_value=past_key_value,
)
if use_cache:
present_key_values += (present_key_value,)
if output_attentions:
all_self_attns += (layer_self_attn,)
if encoder_hidden_states is not None:
all_cross_attns += (layer_cross_attn,)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFBlenderbotSmallMainLayer(tf.keras.layers.Layer):
config_class = BlenderbotSmallConfig
def __init__(self, config: BlenderbotSmallConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = tf.keras.layers.Embedding(
input_dim=config.vocab_size,
output_dim=config.d_model,
embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std),
name="model.shared",
)
# Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
self.shared.load_weight_prefix = "model.shared"
self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder")
self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@unpack_inputs
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_position_ids=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
encoder_outputs = TFBaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not return_dict and not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
decoder_outputs = self.decoder(
decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.",
BLENDERBOT_SMALL_START_DOCSTRING,
)
class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFBlenderbotSmallMainLayer(config, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@unpack_inputs
@add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask: Optional[tf.Tensor] = None,
decoder_position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values: Optional[List[tf.Tensor]] = None,
inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs
) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
# Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
class BiasLayer(tf.keras.layers.Layer):
"""
Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis,
so all weights have to be registered in a layer.
"""
def __init__(self, shape, initializer, trainable, name, **kwargs):
super().__init__(name=name, **kwargs)
# Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
# "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
# https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
def call(self, x):
return x + self.bias
@add_start_docstrings(
"The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
BLENDERBOT_SMALL_START_DOCSTRING,
)
class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFBlenderbotSmallMainLayer(config, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
self.bias_layer = BiasLayer(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.bias_layer.bias}
def set_bias(self, value):
# Replaces the existing layers containing bias for correct (de)serialization.
vocab_size = value["final_logits_bias"].shape[-1]
self.bias_layer = BiasLayer(
name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
)
self.bias_layer.bias.assign(value["final_logits_bias"])
@unpack_inputs
@add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask: Optional[tf.Tensor] = None,
decoder_position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values: Optional[List[tf.Tensor]] = None,
inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
r"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
if labels is not None:
labels = tf.where(
labels == self.config.pad_token_id,
tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
labels,
)
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
lm_logits = self.bias_layer(lm_logits)
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_attention_mask is not None: # xla
decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
elif past is not None: # no xla + past
decoder_position_ids = past[0][0].shape[2]
else: # no xla + no past
decoder_position_ids = tf.range(decoder_input_ids.shape[1])
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_position_ids": decoder_position_ids,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
| {
"content_hash": "eb8204ff9bea19acda2a1474da8f80dd",
"timestamp": "",
"source": "github",
"line_count": 1452,
"max_line_length": 221,
"avg_line_length": 47.74724517906336,
"alnum_prop": 0.6311355998211426,
"repo_name": "huggingface/transformers",
"id": "e6c066af1295f7582e9279e98a263f8a9b701556",
"size": "69977",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.