text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from setuptools import setup, find_packages
import os
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
setup(
name='SwampDragon-live',
version=__import__('swampdragon_live').__version__,
author='Marc Hoersken',
author_email='info@marc-hoersken.de',
packages=find_packages(),
include_package_data=True,
url='https://github.com/mback2k/swampdragon-live',
license='MIT',
description=u' '.join(__import__('swampdragon_live').__doc__.splitlines()).strip(),
install_requires=['SwampDragon', 'SwampDragon-auth'],
classifiers=[
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
],
long_description=read_file('README.md'),
)
|
{
"content_hash": "83e9b9887031c5f8062e64d0c8128286",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 87,
"avg_line_length": 34.411764705882355,
"alnum_prop": 0.6299145299145299,
"repo_name": "mback2k/swampdragon-live",
"id": "b4c8c164cfbc1e214661668417155f00682c950b",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "14731"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="table.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "3158efe558defb41603acb4411f7e2b4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 32.76923076923077,
"alnum_prop": 0.6126760563380281,
"repo_name": "plotly/plotly.py",
"id": "3ad225a5a0d67409539761aab1708687a064e0a3",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/table/hoverlabel/_bgcolorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from time import time
from unittest.mock import patch
import pytest
from esteid.authentication import Authenticator
from esteid.authentication.types import SessionData
from esteid.exceptions import EsteidError, SigningSessionDoesNotExist, SigningSessionExists
@pytest.fixture()
def test_session_data():
return SessionData(timestamp=11111, session_id="test", hash_value_b64="MQ==") # a very old timestamp
@patch.object(Authenticator, "AUTHENTICATION_METHODS", {})
def test_authenticator_register_subclass():
def create_authenticator():
class TestAuthenticator(Authenticator):
pass
return TestAuthenticator
authenticator_class = create_authenticator()
assert Authenticator.AUTHENTICATION_METHODS == {"test": authenticator_class}
# Asserts that the authenticator class has not been registered yet, which is not true
with pytest.raises(AssertionError):
create_authenticator()
@patch.object(Authenticator, "AUTHENTICATION_METHODS", {})
def test_authenticator_select():
class MyAuthenticator(Authenticator):
pass
assert Authenticator.select_authenticator("my") is MyAuthenticator
with pytest.raises(EsteidError):
Authenticator.select_authenticator("nonexistent")
def test_authenticator_init__initial_true(test_session_data):
# Empty session: OK
session = {}
authenticator = Authenticator(session, initial=True)
assert authenticator.session_data == {}
assert authenticator.session is session
assert session == {}
# Wrong data, session is reset
wrong_data = dict(test_session_data)
wrong_data.pop("timestamp")
session = {Authenticator._SESSION_KEY: wrong_data}
authenticator = Authenticator(session, initial=True)
assert authenticator.session_data == {}
assert authenticator.session is session
assert session == {}
# Expired session data, session is reset
session = {Authenticator._SESSION_KEY: {**test_session_data}}
authenticator = Authenticator(session, initial=True)
assert authenticator.session_data == {}
assert authenticator.session is session
assert session == {}
# Some (unvalidated) session data present, not expired => error
session = {Authenticator._SESSION_KEY: {"timestamp": int(time()), "key": "value"}}
with pytest.raises(SigningSessionExists):
Authenticator(session, initial=True)
# Correct session data present, not expired => error
session = {Authenticator._SESSION_KEY: {**test_session_data, "timestamp": int(time()), "key": "value"}}
with pytest.raises(SigningSessionExists):
Authenticator(session, initial=True)
def test_authenticator_init__initial_false(test_session_data):
# Wrong data: empty
session = {}
with pytest.raises(SigningSessionDoesNotExist):
Authenticator(session, initial=False)
# Wrong data: No timestamp field
wrong_data = dict(test_session_data)
wrong_data.pop("timestamp")
session = {Authenticator._SESSION_KEY: wrong_data}
with pytest.raises(SigningSessionDoesNotExist):
Authenticator(session, initial=False)
# Expired session
session = {Authenticator._SESSION_KEY: test_session_data}
with pytest.raises(SigningSessionDoesNotExist):
Authenticator(session, initial=False)
# Session unexpired and valid => All ok
timestamp = int(time()) - Authenticator.SESSION_VALIDITY_TIMEOUT + 1
session = {Authenticator._SESSION_KEY: {**test_session_data, "timestamp": timestamp, "key": "value"}}
authenticator = Authenticator(session, initial=False)
assert authenticator.session_data == {**test_session_data, "timestamp": timestamp, "key": "value"}
assert authenticator.session is session
|
{
"content_hash": "7d4351d7b9acc4b5c6beb4cfca7db2d4",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 107,
"avg_line_length": 34.953271028037385,
"alnum_prop": 0.7195187165775401,
"repo_name": "thorgate/django-esteid",
"id": "f0f0e94017d780f520b68eda0184a2677a227048",
"size": "3740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esteid/authentication/tests/test_authenticator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "53642"
},
{
"name": "JavaScript",
"bytes": "62010"
},
{
"name": "Makefile",
"bytes": "1533"
},
{
"name": "Python",
"bytes": "268714"
}
],
"symlink_target": ""
}
|
"""This is a websocket chat example with many servers. A client can connect to
any of the servers and their messages will be received by all clients connected
to any of the servers.
Run the examples like this:
$ python examples/chat_bridge.py tcp://127.0.0.1:12345 tcp://127.0.0.1:12346
and the servers like this (changing the port for each one obviously):
$ python examples/distributed_websocket_chat.py -p tcp://127.0.0.1:12345 -s tcp://127.0.0.1:12346 7000
So all messages are published to port 12345 and the device forwards all the
messages to 12346 where they are subscribed to
"""
import os, sys
import evy
from collections import defaultdict
from evy import spawn_n, sleep
from evy import wsgi
from evy import websocket
from evy.patched import zmq
from evy.hubs import get_hub, use_hub
from uuid import uuid1
use_hub('zeromq')
ctx = zmq.Context()
class IDName(object):
def __init__ (self):
self.id = uuid1()
self.name = None
def __str__ (self):
if self.name:
return self.name
else:
return str(self.id)
def pack_message (self, msg):
return self, msg
def unpack_message (self, msg):
sender, message = msg
sender_name = 'you said' if sender.id == self.id\
else '%s says' % sender
return "%s: %s" % (sender_name, message)
participants = defaultdict(IDName)
def subscribe_and_distribute (sub_socket):
global participants
while True:
msg = sub_socket.recv_pyobj()
for ws, name_id in participants.items():
to_send = name_id.unpack_message(msg)
if to_send:
try:
ws.send(to_send)
except:
del participants[ws]
@websocket.WebSocketWSGI
def handle (ws):
global pub_socket
name_id = participants[ws]
ws.send("Connected as %s, change name with 'name: new_name'" % name_id)
try:
while True:
m = ws.wait()
if m is None:
break
if m.startswith('name:'):
old_name = str(name_id)
new_name = m.split(':', 1)[1].strip()
name_id.name = new_name
m = 'Changed name from %s' % old_name
pub_socket.send_pyobj(name_id.pack_message(m))
sleep()
finally:
del participants[ws]
def dispatch (environ, start_response):
"""Resolves to the web page or the websocket depending on the path."""
global port
if environ['PATH_INFO'] == '/chat':
return handle(environ, start_response)
else:
start_response('200 OK', [('content-type', 'text/html')])
return [open(os.path.join(
os.path.dirname(__file__),
'websocket_chat.html')).read() % dict(port = port)]
port = None
if __name__ == "__main__":
usage = 'usage: websocket_chat -p pub address -s sub address port number'
if len(sys.argv) != 6:
print usage
sys.exit(1)
pub_addr = sys.argv[2]
sub_addr = sys.argv[4]
try:
port = int(sys.argv[5])
except ValueError:
print "Error port supplied couldn't be converted to int\n", usage
sys.exit(1)
try:
pub_socket = ctx.socket(zmq.PUB)
pub_socket.connect(pub_addr)
print "Publishing to %s" % pub_addr
sub_socket = ctx.socket(zmq.SUB)
sub_socket.connect(sub_addr)
sub_socket.setsockopt(zmq.SUBSCRIBE, "")
print "Subscribing to %s" % sub_addr
except:
print "Couldn't create sockets\n", usage
sys.exit(1)
spawn_n(subscribe_and_distribute, sub_socket)
listener = evy.listen(('127.0.0.1', port))
print "\nVisit http://localhost:%s/ in your websocket-capable browser.\n" % port
wsgi.server(listener, dispatch)
|
{
"content_hash": "e0385786f95806ea790ce66d1f0f7260",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 102,
"avg_line_length": 29.78125,
"alnum_prop": 0.6012591815320042,
"repo_name": "inercia/evy",
"id": "d995c2da51927ada51b3f90072c28d4806fe30b1",
"size": "3812",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/distributed_websocket_chat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "948569"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
}
|
"""Utility functions for classifying and solving
ordinary and partial differential equations.
Contains
========
_preprocess
ode_order
_desolve
"""
from __future__ import print_function, division
from sympy.core.function import Function, Derivative, AppliedUndef
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Wild
def _preprocess(expr, func=None, hint='_Integral'):
"""Prepare expr for solving by making sure that differentiation
is done so that only func remains in unevaluated derivatives and
(if hint doesn't end with _Integral) that doit is applied to all
other derivatives. If hint is None, don't do any differentiation.
(Currently this may cause some simple differential equations to
fail.)
In case func is None, an attempt will be made to autodetect the
function to be solved for.
>>> from sympy.solvers.deutils import _preprocess
>>> from sympy import Derivative, Function, Integral, sin
>>> from sympy.abc import x, y, z
>>> f, g = map(Function, 'fg')
Apply doit to derivatives that contain more than the function
of interest:
>>> _preprocess(Derivative(f(x) + x, x))
(Derivative(f(x), x) + 1, f(x))
Do others if the differentiation variable(s) intersect with those
of the function of interest or contain the function of interest:
>>> _preprocess(Derivative(g(x), y, z), f(y))
(0, f(y))
>>> _preprocess(Derivative(f(y), z), f(y))
(0, f(y))
Do others if the hint doesn't end in '_Integral' (the default
assumes that it does):
>>> _preprocess(Derivative(g(x), y), f(x))
(Derivative(g(x), y), f(x))
>>> _preprocess(Derivative(f(x), y), f(x), hint='')
(0, f(x))
Don't do any derivatives if hint is None:
>>> eq = Derivative(f(x) + 1, x) + Derivative(f(x), y)
>>> _preprocess(eq, f(x), hint=None)
(Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x))
If it's not clear what the function of interest is, it must be given:
>>> eq = Derivative(f(x) + g(x), x)
>>> _preprocess(eq, g(x))
(Derivative(f(x), x) + Derivative(g(x), x), g(x))
>>> try: _preprocess(eq)
... except ValueError: print("A ValueError was raised.")
A ValueError was raised.
"""
derivs = expr.atoms(Derivative)
if not func:
funcs = set.union(*[d.atoms(AppliedUndef) for d in derivs])
if len(funcs) != 1:
raise ValueError('The function cannot be '
'automatically detected for %s.' % expr)
func = funcs.pop()
fvars = set(func.args)
if hint is None:
return expr, func
reps = [(d, d.doit()) for d in derivs if not hint.endswith('_Integral') or
d.has(func) or set(d.variables) & fvars]
eq = expr.subs(reps)
return eq, func
def ode_order(expr, func):
"""
Returns the order of a given differential
equation with respect to func.
This function is implemented recursively.
Examples
========
>>> from sympy import Function
>>> from sympy.solvers.deutils import ode_order
>>> from sympy.abc import x
>>> f, g = map(Function, ['f', 'g'])
>>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +
... f(x).diff(x), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))
3
"""
a = Wild('a', exclude=[func])
if expr.match(a):
return 0
if isinstance(expr, Derivative):
if expr.args[0] == func:
return len(expr.variables)
else:
order = 0
for arg in expr.args[0].args:
order = max(order, ode_order(arg, func) + len(expr.variables))
return order
else:
order = 0
for arg in expr.args:
order = max(order, ode_order(arg, func))
return order
def _desolve(eq, func=None, hint="default", ics=None, simplify=True, **kwargs):
"""This is a helper function to dsolve and pdsolve in the ode
and pde modules.
If the hint provided to the function is "default", then a dict with
the following keys are returned
'func' - It provides the function for which the differential equation
has to be solved. This is useful when the function
'default' - The default key as returned by classifier functions in ode
and pde.py
'hint' - The hint given by the user for which the differential equation
is to be solved. If the hint given by the user is 'default',
then the value of 'hint' and 'default' is the same.
'order' - The order of the function as returned by ode_order
'match' - It returns the match as given by the classifier functions, for
the default hint.
If the hint provided to the function is not "default" and is not in
('all', 'all_Integral', 'best'), then a dict with the above mentioned keys
is returned along with the keys which are returned when dict in
classify_ode or classify_pde is set True
If the hint given is in ('all', 'all_Integral', 'best'), then this function
returns a nested dict, with the keys, being the set of classified hints
returned by classifier functions, and the values being the dict of form
as mentioned above.
Key 'eq' is a common key to all the above mentioned hints which returns an
expression if eq given by user is an Equality.
See Also
========
classify_ode(ode.py)
classify_pde(pde.py)
"""
prep = kwargs.pop('prep', True)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
# preprocess the equation and find func if not given
if prep or func is None:
eq, func = _preprocess(eq, func)
prep = False
# type is an argument passed by the solve functions in ode and pde.py
# that identifies whether the function caller is an ordinary
# or partial differential equation. Accordingly corresponding
# changes are made in the function.
type = kwargs.get('type', None)
xi = kwargs.get('xi')
eta = kwargs.get('eta')
x0 = kwargs.get('x0', 0)
terms = kwargs.get('n')
if type == 'ode':
from sympy.solvers.ode import classify_ode, allhints
classifier = classify_ode
string = 'ODE '
dummy = ''
elif type == 'pde':
from sympy.solvers.pde import classify_pde, allhints
classifier = classify_pde
string = 'PDE '
dummy = 'p'
# Magic that should only be used internally. Prevents classify_ode from
# being called more than it needs to be by passing its results through
# recursive calls.
if kwargs.get('classify', True):
hints = classifier(eq, func, dict=True, ics=ics, xi=xi, eta=eta,
n=terms, x0=x0, prep=prep)
else:
# Here is what all this means:
#
# hint: The hint method given to _desolve() by the user.
# hints: The dictionary of hints that match the DE, along with other
# information (including the internal pass-through magic).
# default: The default hint to return, the first hint from allhints
# that matches the hint; obtained from classify_ode().
# match: Dictionary containing the match dictionary for each hint
# (the parts of the DE for solving). When going through the
# hints in "all", this holds the match string for the current
# hint.
# order: The order of the DE, as determined by ode_order().
hints = kwargs.get('hint',
{'default': hint,
hint: kwargs['match'],
'order': kwargs['order']})
if hints['order'] == 0:
raise ValueError(
str(eq) + " is not a differential equation in " + str(func))
if not hints['default']:
# classify_ode will set hints['default'] to None if no hints match.
if hint not in allhints and hint != 'default':
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints['ordered_hints'] and hint != 'default':
raise ValueError(string + str(eq) + " does not match hint " + hint)
else:
raise NotImplementedError(dummy + "solve" + ": Cannot solve " + str(eq))
if hint == 'default':
return _desolve(eq, func, ics=ics, hint=hints['default'], simplify=simplify,
prep=prep, x0=x0, classify=False, order=hints['order'],
match=hints[hints['default']], xi=xi, eta=eta, n=terms, type=type)
elif hint in ('all', 'all_Integral', 'best'):
retdict = {}
failedhints = {}
gethints = set(hints) - set(['order', 'default', 'ordered_hints'])
if hint == 'all_Integral':
for i in hints:
if i.endswith('_Integral'):
gethints.remove(i[:-len('_Integral')])
# special cases
for k in ["1st_homogeneous_coeff_best", "1st_power_series",
"lie_group", "2nd_power_series_ordinary", "2nd_power_series_regular"]:
if k in gethints:
gethints.remove(k)
for i in gethints:
sol = _desolve(eq, func, ics=ics, hint=i, x0=x0, simplify=simplify, prep=prep,
classify=False, n=terms, order=hints['order'], match=hints[i], type=type)
retdict[i] = sol
retdict['all'] = True
retdict['eq'] = eq
return retdict
elif hint not in allhints: # and hint not in ('default', 'ordered_hints'):
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints:
raise ValueError(string + str(eq) + " does not match hint " + hint)
else:
# Key added to identify the hint needed to solve the equation
hints['hint'] = hint
hints.update({'func': func, 'eq': eq})
return hints
|
{
"content_hash": "c9ff22c15aaa9c4a9b830de70690af27",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 90,
"avg_line_length": 37.352059925093634,
"alnum_prop": 0.5996189712223002,
"repo_name": "kmacinnis/sympy",
"id": "4c87d0d2289ff6d435865df237589c0bed0260c3",
"size": "9973",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sympy/solvers/deutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13573973"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1284"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
The stacks.mutators module. This module contains mutators that modify the
behaviour of Stages.
"""
import abc
import functools
from libpb import env, event, log, make, pkg
from libpb.stacks import base
__all__ = [
"Deinstall", "MakeStage", "Packagable", "PackageInstaller", "PostFetch",
"Resolves"
]
class Deinstall(base.Stage):
"""Deinstall a port's packages before doing the stage."""
# Subclasses are not allowed to raise a job.JobStalled() exception.
def work(self):
"""Deinstall the port's package before continuing with the stage."""
# HACK: overwrite the classes' self._do_stage() method with our own
try:
ds_orig = self._do_stage
# Functools.partial() should be more jit friendly than lambda
self._do_stage = functools.partial(self.__do_stage, _ds=ds_orig)
super(Deinstall, self).work()
finally:
self._do_stage = ds_orig
def __do_stage(self, _ds):
"""Issue a pkg.remove() or proceed with the stage."""
if self.port.install_status == pkg.ABSENT:
self._do_stage = _ds
self._do_stage()
else:
self.port.install_status = pkg.ABSENT
self.pid = pkg.remove(self.port).connect(self.__post_pkg_remove).pid
pkg.db.remove(self.port)
def __post_pkg_remove(self, pkg_remove):
"""Process the results from pkg.remove."""
self.pid = None
if pkg_remove.wait() == make.SUCCESS:
self._do_stage()
else:
log.error("Deinstall.__post_pkg_remove()",
"Port '%s': failed to deinstall for stage %s" %
(self.port.origin, self.name))
self._finalise(False)
class MakeStage(base.Stage):
"""A stage that requires a standard make(1) call."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _pre_make(self):
"""Prepare and issue a make(1) command."""
pass
def _post_make(self, status): # pylint: disable-msg=R0201
"""Process the result from a make(1) command."""
return status
def _do_stage(self):
"""Run the self._pre_make() command to issue a make.target()."""
self._pre_make()
def _make_target(self, targets, **kwargs):
"""Build the requested targets."""
pmake = make.make_target(self.port, targets, **kwargs)
self.pid = pmake.connect(self.__make).pid
def __make(self, pmake):
"""Call the _post_[stage] function and finalise the stage."""
self.pid = None
status = self._post_make(pmake.wait() == make.SUCCESS)
if status is not None:
self._finalise(status)
class Packagable(base.Stage):
"""A stage depending on the packagability of a port."""
@staticmethod
def check(port):
"""Check if the port is compatible with packaging."""
return not port.attr["no_package"]
class PackageInstaller(base.Stage):
"""Install a port from a package."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _add_pkg(self):
"""Issue a pkg.add() command."""
pass
def _do_stage(self): # pylint: disable-msg=E0202
"""Issue a pkg.add() to install the package from a repo."""
log.debug("PackageInstaller._do_stage()",
"Port '%s': building stage %s" %
(self.port.origin, self.name))
pkg_add = self._add_pkg()
# pkg_add may be False if installing `ports-mgmt/pkg` and
# env.flags["pkg_mgmt"] == "pkgng"
if pkg_add:
self.pid = pkg_add.connect(self._post_pkg_add).pid
else:
# Cannot call self._finalise from within self.work() ->
# self._do_stage()
event.post_event(self._finalise, False)
def _post_pkg_add(self, pkg_add):
"""Process the results of pkg.add()."""
self.pid = None
if pkg_add.wait() == make.SUCCESS:
log.debug("PackageInstaller._post_pkg_add()",
"Port '%s': finished stage %s" %
(self.port.origin, self.name))
if "explicit" not in self.port.flags:
pkg_change = self.pid = pkg.change(self.port, "explicit", False)
if pkg_change:
self.pid = pkg_change.connect(self._post_pkg_change).pid
return
self._finalise(True)
else:
log.error("PackageInstaller._port_pkg_add()",
"Port '%s': failed stage %s" %
(self.port.origin, self.name))
self._finalise(False)
def _post_pkg_change(self, _pkg_change):
"""Process the results of pkg.change()."""
self.pid = None
self._finalise(True)
class PostFetch(base.Stage):
"""Indicate this stage is post fetch (and complete if fetch-only)."""
def complete(self):
return env.flags["fetch_only"] or super(PostFetch, self).complete()
class Repo(base.Stage):
"""Indicate if the port is in the repository."""
@staticmethod
def check(port):
"""Check if the port is available via the repository."""
return port in pkg.repo_db
class Resolves(base.Stage):
"""A stage that resolves a port."""
def _finalise(self, status):
"""Mark the port as resolved."""
if status:
pkg.db.add(self.port)
self.port.install_status = pkg.CURRENT
self.port.dependent.status_changed()
super(Resolves, self)._finalise(status)
|
{
"content_hash": "7a08e7d5d5b71c6cffa7f30a09f678dd",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 33.05294117647059,
"alnum_prop": 0.5700302544936822,
"repo_name": "DragonSA/portbuilder",
"id": "f551e3c060f722ac31e906c84fd563ce8897ca8e",
"size": "5619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libpb/stacks/mutators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "146688"
},
{
"name": "Shell",
"bytes": "971"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import os
import subprocess
import pytest
from flexmock import flexmock
from atomic_reactor.constants import EXPORTED_SQUASHED_IMAGE_NAME
from atomic_reactor.util import ImageName
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PostBuildPluginsRunner
from atomic_reactor.plugins.post_cp_built_image_to_nfs import CopyBuiltImageToNFSPlugin
from tests.constants import INPUT_IMAGE
from tests.fixtures import docker_tasker # noqa
class Y(object):
pass
class X(object):
image_id = INPUT_IMAGE
source = Y()
source.dockerfile_path = None
source.path = None
base_image = ImageName(repo="qwe", tag="asd")
NFS_SERVER_PATH = "server:path"
@pytest.mark.parametrize('dest_dir', [None, "test_directory"]) # noqa
def test_cp_built_image_to_nfs(tmpdir, docker_tasker, dest_dir):
mountpoint = tmpdir.join("mountpoint")
def fake_check_call(cmd):
assert cmd == [
"mount",
"-t", "nfs",
"-o", "nolock",
NFS_SERVER_PATH,
mountpoint,
]
flexmock(subprocess, check_call=fake_check_call)
workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image")
workflow.builder = X()
workflow.exported_image_sequence.append({"path": os.path.join(str(tmpdir),
EXPORTED_SQUASHED_IMAGE_NAME)})
open(workflow.exported_image_sequence[-1].get("path"), 'a').close()
runner = PostBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': CopyBuiltImageToNFSPlugin.key,
'args': {
"nfs_server_path": NFS_SERVER_PATH,
"dest_dir": dest_dir,
"mountpoint": str(mountpoint),
}
}]
)
runner.run()
if dest_dir is None:
assert os.path.isfile(os.path.join(str(mountpoint), EXPORTED_SQUASHED_IMAGE_NAME))
else:
assert os.path.isfile(os.path.join(str(mountpoint), dest_dir, EXPORTED_SQUASHED_IMAGE_NAME))
|
{
"content_hash": "590161693094119bc53542ed283d7e8e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 100,
"avg_line_length": 30.026315789473685,
"alnum_prop": 0.6393514460999123,
"repo_name": "jarodwilson/atomic-reactor",
"id": "0f433def73f16d431ab84cb6722ff43c551c8cd6",
"size": "2282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_cp_built_image_to_nfs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1134730"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
}
|
__about__ = """
In addition to what is provided by the "zero" project, this project
provides thorough integration with django-user-accounts, adding
comprehensive account management functionality. It is a foundation
suitable for most sites that have user accounts.
"""
default_app_config = "ich_bau.apps.AppConfig"
|
{
"content_hash": "83f709828a930261bcb859685415a5f0",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 67,
"avg_line_length": 39.375,
"alnum_prop": 0.780952380952381,
"repo_name": "postpdm/ich_bau",
"id": "5f744ac940cc7e07a8f73194cc7966177f46f407",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ich_bau/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "59357"
},
{
"name": "Python",
"bytes": "195507"
}
],
"symlink_target": ""
}
|
class NField(object):
"""
Basic class that represents particle density in observer rest frame.
"""
def n(self, x, y, z):
raise NotImplementedError
def n_vec(self, ps):
raise NotImplementedError
class BKNField(NField):
"""
Class that describes Blandford-Konigle particle density with r**(-2)
dependence on z-distance
"""
def __init__(self, z_0=1., n_0=500.):
self.z_0 = z_0
self.n_0 = n_0
def n(self, x, y, z):
return self.n_0 * (self.z_0 / z) ** 2.
def n_vec(self, ps):
"""
:param ps:
Numpy array with shape (N, 3,) where N is the number of points.
:return:
Numpy array of values in N points.
"""
return self.n_0 * (self.z_0 / ps[:, 2]) ** 2.
|
{
"content_hash": "c6afb4f769ed2743f07352fbad37ed7f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 25.806451612903224,
"alnum_prop": 0.53625,
"repo_name": "ipashchenko/jetsim",
"id": "66273ff224f93af6c2f4f87ef8dd1fa6d5da7c85",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jetsim/nfields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61532"
}
],
"symlink_target": ""
}
|
from passphrase import *
|
{
"content_hash": "b2efed4526f6f1e41c8422c0d085e5ca",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.8,
"repo_name": "Version2beta/passphrase",
"id": "a597bc2c31498cecd491a21f7bb821b135615dd3",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passphrase/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1757"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('repeating_tasks', '0007_auto_20150224_1046'),
]
operations = [
migrations.AlterModelOptions(
name='taskhistory',
options={'ordering': ('completed_at',),
'get_latest_by': 'completed_at'},
),
migrations.AlterField(
model_name='taskhistory',
name='task',
field=models.ForeignKey(
related_name='tasks', to='repeating_tasks.Task'),
preserve_default=True,
),
]
|
{
"content_hash": "21a187bc856b4fae87112e3e8994d2a0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 26.56,
"alnum_prop": 0.5542168674698795,
"repo_name": "ojarva/home-info-display",
"id": "e24bb23780aaa9bfb2b5e197105a56cab11ec3ea",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homedisplay/repeating_tasks/migrations/0008_auto_20150307_1330.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22171"
},
{
"name": "CoffeeScript",
"bytes": "115283"
},
{
"name": "HTML",
"bytes": "51598"
},
{
"name": "JavaScript",
"bytes": "9902"
},
{
"name": "Python",
"bytes": "310675"
},
{
"name": "Shell",
"bytes": "1617"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import TemplateView
from django.contrib import admin
from apps.OASIS_Home import views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^$', views.HomeView.as_view(), name='homePage'),
url(r'^blog/', include('apps.OASIS_Blog.urls'))
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{
"content_hash": "ad98644abe176f6226219b176fe81b58",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 33.19047619047619,
"alnum_prop": 0.7546628407460545,
"repo_name": "brotherjack/oasison-repo",
"id": "77c2e84ef59d502a1d8d1d0018cf2b065dc13b9a",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46686"
},
{
"name": "HTML",
"bytes": "80605"
},
{
"name": "JavaScript",
"bytes": "146647"
},
{
"name": "Python",
"bytes": "22580"
}
],
"symlink_target": ""
}
|
"""This example adds various types of targeting criteria to a given campaign.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignCriterionService.mutate
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
campaign_criterion_service = client.GetService(
'CampaignCriterionService', version='v201406')
# Create locations. The IDs can be found in the documentation or retrieved
# with the LocationCriterionService.
california = {
'xsi_type': 'Location',
'id': '21137'
}
mexico = {
'xsi_type': 'Location',
'id': '2484'
}
# Create languages. The IDs can be found in the documentation or retrieved
# with the ConstantDataService.
english = {
'xsi_type': 'Language',
'id': '1000'
}
spanish = {
'xsi_type': 'Language',
'id': '1003'
}
# Create location groups. The IDs can be found in the documentation or
# retrieved with the LocationCriterionService.
florida_tier3 = {
'xsi_type': 'LocationGroups',
'matchingFunction': {
'operator': 'AND',
'lhsOperand': [{
'xsi_type': 'IncomeOperand',
# Tiers are numbered 1-10, and represent 10% segments of earners.
# For example, TIER_1 is the top 10%, TIER_2 is the 80-90%, etc.
# Tiers 6 through 10 are grouped into TIER_6_TO_10.
'tier': 'TIER_3'
}],
'rhsOperand': [{
'xsi_type': 'GeoTargetOperand',
'locations': [1015116] # Miami, FL
}]
},
}
florida_downtown = {
'xsi_type': 'LocationGroups',
'matchingFunction': {
'operator': 'AND',
'lhsOperand': [{
'xsi_type': 'PlacesOfInterestOperand',
# Other valid options: AIRPORT, UNIVERSITY.
'category': 'DOWNTOWN',
}],
'rhsOperand': [{
'xsi_type': 'GeoTargetOperand',
'locations': [1015116] # Miami, FL
}]
}
}
# Distance targeting. Area of 10 miles around targets above.
distance_10_miles = {
'xsi_type': 'LocationGroups',
'matchingFunction': {
'operator': 'IDENTITY',
'lhsOperand': [{
'xsi_type': 'LocationExtensionOperand',
'radius': {
'xsi_type': 'ConstantOperand',
'type': 'DOUBLE',
'unit': 'MILES',
'doubleValue': 10
}
}]
}
}
# Create a negative campaign criterion operation.
negative_campaign_criterion_operand = {
'xsi_type': 'NegativeCampaignCriterion',
'campaignId': campaign_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'jupiter cruise'
}
}
# Create operations
operations = []
for criterion in [california, mexico, english, spanish, florida_tier3,
florida_downtown, distance_10_miles]:
operations.append({
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'criterion': criterion
}
})
# Add the negative campaign criterion.
operations.append({
'operator': 'ADD',
'operand': negative_campaign_criterion_operand
})
# Make the mutate request.
result = campaign_criterion_service.mutate(operations)
# Display the resulting campaign criteria.
for campaign_criterion in result['value']:
print ('Campaign criterion with campaign id \'%s\', criterion id \'%s\', '
'and type \'%s\' was added.'
% (campaign_criterion['campaignId'],
campaign_criterion['criterion']['id'],
campaign_criterion['criterion']['type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
|
{
"content_hash": "a848d9a3e61fa75445615e37d3157404",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 30.09090909090909,
"alnum_prop": 0.5830815709969789,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "538b56618005331a8e395620075a812544b8c452",
"size": "4921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201406/targeting/add_campaign_targeting_criteria.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
}
|
import boto3
import pprint
import argparse
import sys
from os.path import basename
from boto3.dynamodb.conditions import Key
def search_known_faceid(f):
ddb = boto3.resource('dynamodb', region_name=args.region)
table = ddb.Table('faces')
hits = {}
for i in f['FaceMatches']:
record = table.query(KeyConditionExpression=Key('id').eq(i['Face']['FaceId']))['Items']
if len(record) > 0:
if 'name' in record[0]:
if record[0]['name'] in hits:
hits[record[0]['name']] += 1
else:
hits[record[0]['name']] = 1
return hits
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--image", help="image file (png or jpg)", required=True)
parser.add_argument("-b", "--bucket", help="S3 rekognition bucket", required=True)
parser.add_argument("-c", "--collection", help="rekognition collection id", default='snerted')
parser.add_argument("-r", "--region", help="aws region", default='us-east-1')
parser.add_argument("-n", "--name", help="name of person in picture", default=None)
args = parser.parse_args()
externalImageId = basename(args.image)
s3 = boto3.client('s3')
results = s3.list_objects(Bucket=args.bucket, Prefix=externalImageId)
if 'Contents' in results:
sys.exit("ERROR: {} already exists in bucket {}".format(externalImageId, args.bucket))
else:
# first copy image up to s3
with open(args.image, 'rb') as data:
s3.upload_fileobj(data, args.bucket, args.image)
# recognize
rekognition = boto3.client('rekognition', region_name=args.region)
with open(args.image, 'rb') as image:
response = rekognition.index_faces(CollectionId=args.collection,
DetectionAttributes=["ALL"],
Image={"S3Object": {
"Bucket": args.bucket,
"Name": externalImageId
}},
ExternalImageId=externalImageId)
if len(response['FaceRecords']) == 0:
print "No faces found"
elif len(response['FaceRecords']) == 1 and args.name is not None:
dynamodb = boto3.resource('dynamodb', region_name=args.region)
table = dynamodb.Table('faces')
for i in response['FaceRecords']:
resp = table.put_item(
Item={
'id': i['Face']['FaceId'],
'name': args.name
}
)
else:
print "Face(s) Found: {} ".format(len(response['FaceRecords']))
if args.name is not None:
print("Too many faces in image, ignoring name argument")
dynamodb = boto3.resource('dynamodb', region_name=args.region)
table = dynamodb.Table('faces')
for i in response['FaceRecords']:
r = rekognition.search_faces(
CollectionId=args.collection,
FaceId=i['Face']['FaceId']
)
hits = search_known_faceid(r)
if len(hits) == 1:
resp = table.put_item(
Item={
'id': i['Face']['FaceId'],
'name': hits.keys()[0]
}
)
print("================================================================")
print(
"Found: {} Hits: {} FaceId: {}".format(hits.keys()[0],
hits.values()[0],
i['Face']['FaceId'])
)
else:
resp = table.put_item(
Item={
'id': i['Face']['FaceId']
}
)
print("================================================================")
print("Unknown:")
pprint.pprint(i)
|
{
"content_hash": "d51d72c884d17c47088bad14ff8c4710",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 98,
"avg_line_length": 45.59,
"alnum_prop": 0.42904145645974995,
"repo_name": "stevewoolley/riot",
"id": "722bd1a59317ab57c24ef9e78834735f58a598de",
"size": "4582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faces/absorb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "280"
},
{
"name": "HTML",
"bytes": "92449"
},
{
"name": "JavaScript",
"bytes": "42325"
},
{
"name": "Python",
"bytes": "29633"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
}
|
import oauth2 as oauth
import yaml
# import urllib
import json
import os
import time
# import pytz
import datetime
import argparse
import re
import sys
DOMAIN = 'https://ads-api.twitter.com'
VERBOSE = 0
NON_SUB_PARAM_SEGMENTATION_TYPES = ['PLATFORMS', 'LOCATIONS', 'GENDER', 'INTERESTS', 'KEYWORDS']
def main(options):
global VERBOSE
account = options.account_id
headers = options.headers
if options.veryverbose:
VERBOSE = 2
elif options.verbose:
VERBOSE = 1
start = time.clock()
user_twurl = twurlauth()
print("Best practices stats check for :account_id %s" % account)
linesep()
now = datetime.datetime.utcnow()
start_time = datetime.datetime.utcnow() - datetime.timedelta(days=7)
start_time = start_time.replace(minute=0, second=0, microsecond=0)
end_time = datetime.datetime.utcnow()
end_time = end_time.replace(minute=0, second=0, microsecond=0)
end_time -= datetime.timedelta(seconds=1)
print('Current time:\t%s' % now)
print('Start time:\t%s' % start_time)
print('End time:\t%s' % end_time)
linesep()
# check that we have access to this :account_id
resource_path = '/0/accounts/%s' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
if len(data) == 0:
print('ERROR: Could not locate :account_id %s' % account)
sys.exit(0)
# fetch funding instruments
resource_path = '/0/accounts/%s/funding_instruments?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter funding instruments
print("Pre-filtered data:\t\t%s" % len(data))
funding_instruments = check(data, start_time, end_time)
print("Funding instruments:\t\t%s" % len(funding_instruments))
# fetch campaigns
resource_path = '/0/accounts/%s/campaigns?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter campaigns
print("Pre-filtered data:\t\t%s" % len(data))
campaigns = check(data, start_time, end_time, 'funding_instrument_id', funding_instruments)
print("Campaigns:\t\t\t%s" % len(campaigns))
# fetch line items
resource_path = '/0/accounts/%s/line_items?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter line items
print("Pre-filtered data:\t\t%s" % len(data))
line_items = check(data, start_time, end_time, 'campaign_id', campaigns)
print("Line items:\t\t\t%s" % len(line_items))
# fetch promoted_tweets
resource_path = '/0/accounts/%s/promoted_tweets?with_deleted=true&count=1000' % account
data = get_data(user_twurl, 'GET', headers, DOMAIN + resource_path)
# filter promoted_tweets
print("Pre-filtered data:\t\t%s" % len(data))
promoted_tweets = check(data, start_time, end_time, 'line_item_id', line_items)
print("Promoted Tweets:\t\t%s" % len(promoted_tweets))
total_query_count = 0
total_request_cost = 0
total_rate_limited_query_count = 0
segmented_query_count = 0
segmented_request_cost = 0
if len(line_items) > 0:
print("\tfetching stats for %s line items" % len(line_items))
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account, 'line_items',
start_time, end_time, line_items)
total_query_count += query_count
total_request_cost += cost_total
if len(promoted_tweets) > 0:
print("\tfetching stats for %s promoted tweets" % len(promoted_tweets))
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account, 'promoted_tweets',
start_time, end_time, promoted_tweets)
total_query_count += query_count
total_request_cost += cost_total
total_rate_limited_query_count += rate_limited_query_count
# Segmentation queries
if options.segmentation:
if len(line_items) > 0:
print("\tfetching segmentation stats for %s line items" % len(line_items))
for i in NON_SUB_PARAM_SEGMENTATION_TYPES:
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account,
'line_items', start_time, end_time,
line_items, i)
total_query_count += query_count
total_request_cost += cost_total
segmented_query_count += query_count
segmented_request_cost += cost_total
if len(promoted_tweets) > 0:
print("\tfetching segmentation stats for %s promoted tweets" % len(promoted_tweets))
for i in NON_SUB_PARAM_SEGMENTATION_TYPES:
(query_count,
cost_total,
rate_limited_query_count) = gather_stats(user_twurl, headers, account,
'promoted_tweets', start_time, end_time,
promoted_tweets, i)
total_query_count += query_count
total_request_cost += cost_total
segmented_query_count += query_count
segmented_request_cost += cost_total
linesep()
if options.segmentation:
print("Non-Seg Stats Req Cost:\t\t%s" % (total_request_cost - segmented_request_cost))
print("Segmented Stats Req Cost:\t%s" % segmented_request_cost)
linesep()
print("Total Stats Queries:\t\t%s" % total_query_count)
print("Total Stats Request Cost:\t%s" % total_request_cost)
if VERBOSE > 0:
print("Avg Cost per Query:\t\t%s" % str(total_request_cost / total_query_count))
print("Queries Rate Limited:\t\t%s" % total_rate_limited_query_count)
linesep()
elapsed = (time.clock() - start)
print('Time elapsed:\t\t\t%s' % elapsed)
def input():
p = argparse.ArgumentParser(description='Fetch Twitter Ads Account Stats')
p.add_argument('-a', '--account', required=True, dest='account_id', help='Ads Account ID')
p.add_argument('-A', '--header', dest='headers', action='append',
help='HTTP headers to include')
p.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose outputs cost avgs')
p.add_argument('-vv', '--very-verbose', dest='veryverbose', action='store_true',
help='Very verbose outputs API queries made')
p.add_argument('-s', '--segmentation', dest='segmentation', help='Pull segmentation stats',
action='store_true')
args = p.parse_args()
return args
def twurlauth():
with open(os.path.expanduser('~/.twurlrc'), 'r') as f:
contents = yaml.load(f)
f.close()
default_user = contents["configuration"]["default_profile"][0]
CONSUMER_KEY = contents["configuration"]["default_profile"][1]
CONSUMER_SECRET = contents["profiles"][default_user][CONSUMER_KEY]["consumer_secret"]
USER_OAUTH_TOKEN = contents["profiles"][default_user][CONSUMER_KEY]["token"]
USER_OAUTH_TOKEN_SECRET = contents["profiles"][default_user][CONSUMER_KEY]["secret"]
return CONSUMER_KEY, CONSUMER_SECRET, USER_OAUTH_TOKEN, USER_OAUTH_TOKEN_SECRET
def request(user_twurl, http_method, headers, url):
CONSUMER_KEY = user_twurl[0]
CONSUMER_SECRET = user_twurl[1]
USER_OAUTH_TOKEN = user_twurl[2]
USER_OAUTH_TOKEN_SECRET = user_twurl[3]
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(key=USER_OAUTH_TOKEN, secret=USER_OAUTH_TOKEN_SECRET)
client = oauth.Client(consumer, token)
header_list = {}
if headers:
for i in headers:
(key, value) = i.split(': ')
if key and value:
header_list[key] = value
response, content = client.request(url, method=http_method, headers=header_list)
try:
data = json.loads(content)
except:
data = None
return response, data
def get_data(user_twurl, http_method, headers, url):
data = []
res_headers, response = request(user_twurl, http_method, headers, url)
if res_headers['status'] != '200':
print('ERROR: query failed, cannot continue: %s' % url)
sys.exit(0)
if response and 'data' in response:
data += response['data']
while 'next_cursor' in response and response['next_cursor'] is not None:
cursor_url = url + '&cursor=%s' % response['next_cursor']
res_headers, response = request(user_twurl, http_method, headers, cursor_url)
if response and 'data' in response:
data += response['data']
return data
def gather_stats(user_twurl, headers, account_id, entity_type, start_time, end_time, input_entities,
segmentation=None):
entities = list(input_entities)
resource_url = DOMAIN + "/0/stats/accounts/%s/%s" % (account_id, entity_type)
param_data = (start_time.isoformat(), end_time.isoformat())
query_params = '?granularity=HOUR&start_time=%sZ&end_time=%sZ' % param_data
query_param_entity_name = re.sub(r's$', '_ids', entity_type)
if segmentation:
query_params += '&segmentation_type=%s' % segmentation
query_count = 0
cost_total = 0
rate_limited_query_count = 0
limit_exceeded_sleep = 0
while entities:
if limit_exceeded_sleep > 0:
print('\t! sleeping for %s' % limit_exceeded_sleep)
time.sleep(limit_exceeded_sleep)
limit_exceeded_sleep = 0
query_entities = []
limit = 20
if len(entities) < limit:
limit = len(entities)
for _ in range(limit):
query_entities.append(entities.pop(0))
url_entites = '&%s=%s' % (query_param_entity_name, ','.join(query_entities))
stats_url = resource_url + query_params + url_entites
res_headers, res_data = request(user_twurl, 'GET', headers, stats_url)
if 'x-request-cost' in res_headers:
cost_total += int(res_headers['x-request-cost'])
reset_at = int(res_headers['x-cost-rate-limit-reset'])
if (('x-cost-rate-limit-remaining' in res_headers and
int(res_headers['x-cost-rate-limit-remaining']) == 0) and
res_headers['status'] == '429'):
limit_exceeded_sleep = reset_at - int(time.time())
if res_headers['status'] == '200':
query_count += 1
if VERBOSE > 1:
print('VERBOSE:\tStats Query:\t%s' % stats_url)
elif res_headers['status'] == '429':
print("RATE LIMITED! adding entities back to queue")
rate_limited_query_count += 1
entities.extend(query_entities)
elif res_headers['status'] == '503':
print("TIMEOUT!")
print(stats_url)
entities.extend(query_entities)
else:
print("ERROR %s" % res_headers['status'])
print(res_headers)
sys.exit(0)
if VERBOSE > 0:
if segmentation:
print('VERBOSE:\tSegmentation type:\t%s' % segmentation)
print('VERBOSE:\tAvg cost per query:\t%s' % str(cost_total / query_count))
return query_count, cost_total, rate_limited_query_count
def check(data, start_time, end_time, filter_field=None, filter_data=[]):
d = []
if data and len(data) > 0:
for i in data:
if 'end_time' in i and i['end_time'] and format_timestamp(i['end_time']) < start_time:
continue
elif ('start_time' in i and i['start_time'] and
format_timestamp(i['start_time']) > end_time):
continue
elif i['deleted'] and format_timestamp(i['updated_at']) < start_time:
continue
elif i['paused'] and format_timestamp(i['updated_at']) < start_time:
continue
elif filter_field and i[filter_field] not in filter_data:
continue
else:
d.append(i['id'])
return d
def format_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
def linesep():
print('-----------------------------------------------')
if __name__ == '__main__':
options = input()
main(options)
|
{
"content_hash": "72e52831da90a961873e4e905ccda179",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 100,
"avg_line_length": 36.5,
"alnum_prop": 0.595930002375485,
"repo_name": "twitterdev/ads-platform-tools",
"id": "1c34a90004311f67906c6ddffd962fb446f1d3c1",
"size": "12670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fetch_stats.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "3330"
},
{
"name": "Python",
"bytes": "31198"
},
{
"name": "Ruby",
"bytes": "5128"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('studygroups', '0155_studygroup_unlisted'),
]
operations = [
migrations.CreateModel(
name='FacilitatorGuide',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('title', models.CharField(max_length=256)),
('caption', models.CharField(max_length=512)),
('link', models.URLField()),
('image', models.ImageField(blank=True, upload_to='')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='studygroups.Course')),
('study_group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='studygroups.StudyGroup')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
{
"content_hash": "48c06c3d376e132720eb8f7229b1bdde",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 137,
"avg_line_length": 44.57575757575758,
"alnum_prop": 0.5955132562882393,
"repo_name": "p2pu/learning-circles",
"id": "98680f6b3ece5e8e15ecf0847264789026d2f63a",
"size": "1521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "studygroups/migrations/0156_facilitatorguide.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "2110"
},
{
"name": "HTML",
"bytes": "222765"
},
{
"name": "JavaScript",
"bytes": "202138"
},
{
"name": "Python",
"bytes": "859945"
},
{
"name": "SCSS",
"bytes": "122949"
},
{
"name": "Shell",
"bytes": "808"
}
],
"symlink_target": ""
}
|
""" define extension dtypes """
import re
import numpy as np
from pandas import compat
from pandas.core.dtypes.generic import ABCIndexClass
class ExtensionDtype(object):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
THIS IS NOT A REAL NUMPY DTYPE
"""
name = None
names = None
type = None
subdtype = None
kind = None
str = None
num = 100
shape = tuple()
itemsize = 8
base = None
isbuiltin = 0
isnative = 0
_metadata = []
_cache = {}
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
raise NotImplementedError("sub-classes should implement an __hash__ "
"method")
def __eq__(self, other):
raise NotImplementedError("sub-classes should implement an __eq__ "
"method")
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
# pickle support; we don't want to pickle the cache
return {k: getattr(self, k, None) for k in self._metadata}
@classmethod
def reset_cache(cls):
""" clear the cache """
cls._cache = {}
@classmethod
def is_dtype(cls, dtype):
""" Return a boolean if the passed type is an actual dtype that
we can match (via string or type)
"""
if hasattr(dtype, 'dtype'):
dtype = dtype.dtype
if isinstance(dtype, np.dtype):
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
try:
return cls.construct_from_string(dtype) is not None
except:
return False
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
pass
class CategoricalDtype(ExtensionDtype):
"""
Type for categorical data with the categories and orderedness
.. versionchanged:: 0.21.0
Parameters
----------
categories : sequence, optional
Must be unique, and must not contain any nulls.
ordered : bool, default False
Notes
-----
This class is useful for specifying the type of a ``Categorical``
independent of the values. See :ref:`categorical.categoricaldtype`
for more.
Examples
--------
>>> t = CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
0 a
1 b
2 a
3 NaN
dtype: category
Categories (2, object): [b < a]
See Also
--------
Categorical
"""
# TODO: Document public vs. private API
name = 'category'
type = CategoricalDtypeType
kind = 'O'
str = '|O08'
base = np.dtype('O')
_metadata = ['categories', 'ordered']
_cache = {}
def __init__(self, categories=None, ordered=False):
self._finalize(categories, ordered, fastpath=False)
@classmethod
def _from_fastpath(cls, categories=None, ordered=False):
self = cls.__new__(cls)
self._finalize(categories, ordered, fastpath=True)
return self
@classmethod
def _from_categorical_dtype(cls, dtype, categories=None, ordered=None):
if categories is ordered is None:
return dtype
if categories is None:
categories = dtype.categories
if ordered is None:
ordered = dtype.ordered
return cls(categories, ordered)
def _finalize(self, categories, ordered, fastpath=False):
from pandas.core.indexes.base import Index
if ordered is None:
ordered = False
if categories is not None:
categories = Index(categories, tupleize_cols=False)
# validation
self._validate_categories(categories)
self._validate_ordered(ordered)
self._categories = categories
self._ordered = ordered
def __setstate__(self, state):
self._categories = state.pop('categories', None)
self._ordered = state.pop('ordered', False)
def __hash__(self):
# _hash_categories returns a uint64, so use the negative
# space for when we have unknown categories to avoid a conflict
if self.categories is None:
if self.ordered:
return -1
else:
return -2
# We *do* want to include the real self.ordered here
return int(self._hash_categories(self.categories, self.ordered))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
if not (hasattr(other, 'ordered') and hasattr(other, 'categories')):
return False
elif self.categories is None or other.categories is None:
# We're forced into a suboptimal corner thanks to math and
# backwards compatibility. We require that `CDT(...) == 'category'`
# for all CDTs **including** `CDT(None, ...)`. Therefore, *all*
# CDT(., .) = CDT(None, False) and *all*
# CDT(., .) = CDT(None, True).
return True
elif self.ordered:
return other.ordered and self.categories.equals(other.categories)
elif other.ordered:
return False
else:
# both unordered; this could probably be optimized / cached
return hash(self) == hash(other)
def __unicode__(self):
tpl = u'CategoricalDtype(categories={}ordered={})'
if self.categories is None:
data = u"None, "
else:
data = self.categories._format_data(name=self.__class__.__name__)
return tpl.format(data, self.ordered)
@staticmethod
def _hash_categories(categories, ordered=True):
from pandas.core.util.hashing import (
hash_array, _combine_hash_arrays, hash_tuples
)
if len(categories) and isinstance(categories[0], tuple):
# assumes if any individual category is a tuple, then all our. ATM
# I don't really want to support just some of the categories being
# tuples.
categories = list(categories) # breaks if a np.array of categories
cat_array = hash_tuples(categories)
else:
if categories.dtype == 'O':
types = [type(x) for x in categories]
if not len(set(types)) == 1:
# TODO: hash_array doesn't handle mixed types. It casts
# everything to a str first, which means we treat
# {'1', '2'} the same as {'1', 2}
# find a better solution
cat_array = np.array([hash(x) for x in categories])
hashed = hash((tuple(categories), ordered))
return hashed
cat_array = hash_array(np.asarray(categories), categorize=False)
if ordered:
cat_array = np.vstack([
cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)
])
else:
cat_array = [cat_array]
hashed = _combine_hash_arrays(iter(cat_array),
num_items=len(cat_array))
if len(hashed) == 0:
# bug in Numpy<1.12 for length 0 arrays. Just return the correct
# value of 0
return 0
else:
return np.bitwise_xor.reduce(hashed)
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
it's not possible """
try:
if string == 'category':
return cls()
except:
pass
raise TypeError("cannot construct a CategoricalDtype")
@staticmethod
def _validate_ordered(ordered):
"""
Validates that we have a valid ordered parameter. If
it is not a boolean, a TypeError will be raised.
Parameters
----------
ordered : object
The parameter to be verified.
Raises
------
TypeError
If 'ordered' is not a boolean.
"""
from pandas.core.dtypes.common import is_bool
if not is_bool(ordered):
raise TypeError("'ordered' must either be 'True' or 'False'")
@staticmethod
def _validate_categories(categories, fastpath=False):
"""
Validates that we have good categories
Parameters
----------
categories : array-like
fastpath : bool
Whether to skip nan and uniqueness checks
Returns
-------
categories : Index
"""
from pandas import Index
if not isinstance(categories, ABCIndexClass):
categories = Index(categories)
if not fastpath:
if categories.hasnans:
raise ValueError('Categorial categories cannot be null')
if not categories.is_unique:
raise ValueError('Categorical categories must be unique')
return categories
@property
def categories(self):
"""
An ``Index`` containing the unique categories allowed.
"""
return self._categories
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self._ordered
class DatetimeTZDtypeType(type):
"""
the type of DatetimeTZDtype, this metaclass determines subclass ability
"""
pass
class DatetimeTZDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom datetime with tz
dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of
np.datetime64[ns]
"""
type = DatetimeTZDtypeType
kind = 'M'
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
_metadata = ['unit', 'tz']
_match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
_cache = {}
def __new__(cls, unit=None, tz=None):
""" Create a new unit if needed, otherwise return from the cache
Parameters
----------
unit : string unit that this represents, currently must be 'ns'
tz : string tz that this represents
"""
if isinstance(unit, DatetimeTZDtype):
unit, tz = unit.unit, unit.tz
elif unit is None:
# we are called as an empty constructor
# generally for pickle compat
return object.__new__(cls)
elif tz is None:
# we were passed a string that we can construct
try:
m = cls._match.search(unit)
if m is not None:
unit = m.groupdict()['unit']
tz = m.groupdict()['tz']
except:
raise ValueError("could not construct DatetimeTZDtype")
elif isinstance(unit, compat.string_types):
if unit != 'ns':
raise ValueError("DatetimeTZDtype only supports ns units")
unit = unit
tz = tz
if tz is None:
raise ValueError("DatetimeTZDtype constructor must have a tz "
"supplied")
# hash with the actual tz if we can
# some cannot be hashed, so stringfy
try:
key = (unit, tz)
hash(key)
except TypeError:
key = (unit, str(tz))
# set/retrieve from cache
try:
return cls._cache[key]
except KeyError:
u = object.__new__(cls)
u.unit = unit
u.tz = tz
cls._cache[key] = u
return u
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
it's not possible
"""
try:
return cls(unit=string)
except ValueError:
raise TypeError("could not construct DatetimeTZDtype")
def __unicode__(self):
# format the tz
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
@property
def name(self):
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return (isinstance(other, DatetimeTZDtype) and
self.unit == other.unit and
str(self.tz) == str(other.tz))
class PeriodDtypeType(type):
"""
the type of PeriodDtype, this metaclass determines subclass ability
"""
pass
class PeriodDtype(ExtensionDtype):
__metaclass__ = PeriodDtypeType
"""
A Period duck-typed class, suitable for holding a period with freq dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.int64.
"""
type = PeriodDtypeType
kind = 'O'
str = '|O08'
base = np.dtype('O')
num = 102
_metadata = ['freq']
_match = re.compile("(P|p)eriod\[(?P<freq>.+)\]")
_cache = {}
def __new__(cls, freq=None):
"""
Parameters
----------
freq : frequency
"""
if isinstance(freq, PeriodDtype):
return freq
elif freq is None:
# empty constructor for pickle compat
return object.__new__(cls)
from pandas.tseries.offsets import DateOffset
if not isinstance(freq, DateOffset):
freq = cls._parse_dtype_strict(freq)
try:
return cls._cache[freq.freqstr]
except KeyError:
u = object.__new__(cls)
u.freq = freq
cls._cache[freq.freqstr] = u
return u
@classmethod
def _parse_dtype_strict(cls, freq):
if isinstance(freq, compat.string_types):
if freq.startswith('period[') or freq.startswith('Period['):
m = cls._match.search(freq)
if m is not None:
freq = m.group('freq')
from pandas.tseries.frequencies import to_offset
freq = to_offset(freq)
if freq is not None:
return freq
raise ValueError("could not construct PeriodDtype")
@classmethod
def construct_from_string(cls, string):
"""
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
from pandas.tseries.offsets import DateOffset
if isinstance(string, (compat.string_types, DateOffset)):
# avoid tuple to be regarded as freq
try:
return cls(freq=string)
except ValueError:
pass
raise TypeError("could not construct PeriodDtype")
def __unicode__(self):
return "period[{freq}]".format(freq=self.freq.freqstr)
@property
def name(self):
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name or other == self.name.title()
return isinstance(other, PeriodDtype) and self.freq == other.freq
@classmethod
def is_dtype(cls, dtype):
"""
Return a boolean if we if the passed type is an actual dtype that we
can match (via string or type)
"""
if isinstance(dtype, compat.string_types):
# PeriodDtype can be instanciated from freq string like "U",
# but dosn't regard freq str like "U" as dtype.
if dtype.startswith('period[') or dtype.startswith('Period['):
try:
if cls._parse_dtype_strict(dtype) is not None:
return True
else:
return False
except ValueError:
return False
else:
return False
return super(PeriodDtype, cls).is_dtype(dtype)
class IntervalDtypeType(type):
"""
the type of IntervalDtype, this metaclass determines subclass ability
"""
pass
class IntervalDtype(ExtensionDtype):
__metaclass__ = IntervalDtypeType
"""
A Interval duck-typed class, suitable for holding an interval
THIS IS NOT A REAL NUMPY DTYPE
"""
type = IntervalDtypeType
kind = None
str = '|O08'
base = np.dtype('O')
num = 103
_metadata = ['subtype']
_match = re.compile("(I|i)nterval\[(?P<subtype>.+)\]")
_cache = {}
def __new__(cls, subtype=None):
"""
Parameters
----------
subtype : the dtype of the Interval
"""
if isinstance(subtype, IntervalDtype):
return subtype
elif subtype is None:
# we are called as an empty constructor
# generally for pickle compat
u = object.__new__(cls)
u.subtype = None
return u
elif (isinstance(subtype, compat.string_types) and
subtype == 'interval'):
subtype = ''
else:
if isinstance(subtype, compat.string_types):
m = cls._match.search(subtype)
if m is not None:
subtype = m.group('subtype')
from pandas.core.dtypes.common import pandas_dtype
try:
subtype = pandas_dtype(subtype)
except TypeError:
raise ValueError("could not construct IntervalDtype")
if subtype is None:
u = object.__new__(cls)
u.subtype = None
return u
try:
return cls._cache[str(subtype)]
except KeyError:
u = object.__new__(cls)
u.subtype = subtype
cls._cache[str(subtype)] = u
return u
@classmethod
def construct_from_string(cls, string):
"""
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
if isinstance(string, compat.string_types):
try:
return cls(string)
except ValueError:
pass
raise TypeError("could not construct IntervalDtype")
def __unicode__(self):
if self.subtype is None:
return "interval"
return "interval[{subtype}]".format(subtype=self.subtype)
@property
def name(self):
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name or other == self.name.title()
return (isinstance(other, IntervalDtype) and
self.subtype == other.subtype)
@classmethod
def is_dtype(cls, dtype):
"""
Return a boolean if we if the passed type is an actual dtype that we
can match (via string or type)
"""
if isinstance(dtype, compat.string_types):
if dtype.lower().startswith('interval'):
try:
if cls.construct_from_string(dtype) is not None:
return True
else:
return False
except ValueError:
return False
else:
return False
return super(IntervalDtype, cls).is_dtype(dtype)
|
{
"content_hash": "b316f5e96fe07537157ea4a1c06bc4b0",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 79,
"avg_line_length": 29.53602305475504,
"alnum_prop": 0.5519075031710411,
"repo_name": "Winand/pandas",
"id": "d2487905caced2bdb6fa05f68599afa149d371dd",
"size": "20498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/dtypes/dtypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4071"
},
{
"name": "C",
"bytes": "493226"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2972"
},
{
"name": "Python",
"bytes": "12199454"
},
{
"name": "R",
"bytes": "1177"
},
{
"name": "Shell",
"bytes": "23114"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
from apsw import SQLError
import os
from pandas.util.testing import assert_frame_equal
import pytest
import random
import tempfile
import bayeslite
from bayeslite.exception import BayesLiteException as BLE
from bdbcontrib import parallel
from bdbcontrib.bql_utils import cursor_to_df
import test_bql_utils
def test_estimate_pairwise_similarity():
"""
Tests basic estimate pairwise similarity functionality against
existing BQL estimate queries.
"""
with tempfile.NamedTemporaryFile(suffix='.bdb') as bdb_file:
bdb = bayeslite.bayesdb_open(bdb_file.name)
with tempfile.NamedTemporaryFile() as temp:
temp.write(test_bql_utils.csv_data)
temp.seek(0)
bayeslite.bayesdb_read_csv_file(
bdb, 't', temp.name, header=True, create=True)
bdb.execute('''
CREATE GENERATOR t_cc FOR t USING crosscat (
GUESS(*),
id IGNORE
)
''')
bdb.execute('INITIALIZE 3 MODELS FOR t_cc')
bdb.execute('ANALYZE t_cc MODELS 0-2 FOR 10 ITERATIONS WAIT')
# How to properly use the estimate_pairwise_similarity function.
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc'
)
# Should complain with bad core value
with pytest.raises(BLE):
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc', cores=0
)
# Should complain if overwrite flag is not set, but t_similarity
# exists
with pytest.raises(SQLError):
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc'
)
# Should complain if model and table don't exist
with pytest.raises(SQLError):
parallel.estimate_pairwise_similarity(
bdb_file.name, 'foo', 'foo_cc'
)
# Should complain if bdb_file doesn't exist
with tempfile.NamedTemporaryFile() as does_not_exist:
with pytest.raises(SQLError):
parallel.estimate_pairwise_similarity(
does_not_exist.name, 't', 't_cc'
)
# Should run fine if overwrite flag is set
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc', overwrite=True
)
# Should be able to specify another table name
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc', sim_table='t_similarity_2'
)
parallel_sim = cursor_to_df(
bdb.execute('SELECT * FROM t_similarity ORDER BY rowid0, rowid1')
)
parallel_sim_2 = cursor_to_df(
bdb.execute('SELECT * FROM t_similarity_2 ORDER BY rowid0, rowid1')
)
# Results may be returned out of order. So we sort the values,
# as above, and we reorder the numeric index
parallel_sim.index = range(parallel_sim.shape[0])
parallel_sim_2.index = range(parallel_sim_2.shape[0])
# The data from two successive parallel pairwise estimates should be
# identical to each other...
assert_frame_equal(
parallel_sim, parallel_sim_2, check_column_type=True)
# ...and to a standard estimate pairwise similarity.
std_sim = cursor_to_df(
bdb.execute('ESTIMATE SIMILARITY FROM PAIRWISE t_cc')
)
assert_frame_equal(std_sim, parallel_sim, check_column_type=True)
def _bigger_csv_data(n=30):
"""
Bigger, but not *too* big, csv data to test batch uploading without
requiring tons of time for a non-parallelized estimate query
"""
data = [
'id,one,two,three,four'
]
for i in xrange(n):
data.append('{},{},{},{},{}'.format(
i,
random.randrange(0, 6),
random.randrange(0, 6),
random.randrange(0, 6),
random.choice(['one', 'two', 'three', 'four', 'five'])
))
return '\n'.join(data)
def test_estimate_pairwise_similarity_long():
"""
Tests larger queries that need to be broken into batch inserts of 500
values each, as well as the N parameter.
"""
with tempfile.NamedTemporaryFile(suffix='.bdb') as bdb_file:
bdb = bayeslite.bayesdb_open(bdb_file.name)
with tempfile.NamedTemporaryFile() as temp:
# n = 40 -> 40**2 -> 1600 rows total
temp.write(_bigger_csv_data(40))
temp.seek(0)
bayeslite.bayesdb_read_csv_file(
bdb, 't', temp.name, header=True, create=True)
bdb.execute('''
CREATE GENERATOR t_cc FOR t USING crosscat (
GUESS(*),
id IGNORE
)
''')
bdb.execute('INITIALIZE 3 MODELS FOR t_cc')
bdb.execute('ANALYZE t_cc MODELS 0-2 FOR 10 ITERATIONS WAIT')
# test N = 0
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc', N=0
)
assert cursor_to_df(
bdb.execute('SELECT * FROM t_similarity')
).shape == (0, 0)
# test other values of N
for N in [1, 2, 10, 20, 40]:
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc', N=N, overwrite=True
)
assert cursor_to_df(
bdb.execute('SELECT * FROM t_similarity')
).shape == (N**2, 3)
# N too high should fail
with pytest.raises(BLE):
parallel.estimate_pairwise_similarity(
bdb_file.name, 't', 't_cc', N=41, overwrite=True
)
parallel_sim = cursor_to_df(
bdb.execute('SELECT * FROM t_similarity ORDER BY rowid0, rowid1')
)
parallel_sim.index = range(parallel_sim.shape[0])
std_sim = cursor_to_df(
bdb.execute('ESTIMATE SIMILARITY FROM PAIRWISE t_cc')
)
assert_frame_equal(std_sim, parallel_sim, check_column_type=True)
|
{
"content_hash": "e316026a3c9f13a4dfc6e3705a1028bd",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 34.70520231213873,
"alnum_prop": 0.5781145902731513,
"repo_name": "probcomp/bdbcontrib",
"id": "1e110c6558ac93950cfe5a297d966e98bafe2edf",
"size": "6659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parallel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "709"
},
{
"name": "Jupyter Notebook",
"bytes": "55896"
},
{
"name": "Makefile",
"bytes": "2016"
},
{
"name": "Python",
"bytes": "406288"
},
{
"name": "Shell",
"bytes": "1578"
}
],
"symlink_target": ""
}
|
import json
from sqlalchemy import types as sa_types
class JSONEncodedDict(sa_types.TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = sa_types.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is None:
return {}
return json.loads(value)
|
{
"content_hash": "027604e546825fca49a7ceaa7ae2cfe2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 24.894736842105264,
"alnum_prop": 0.6469344608879493,
"repo_name": "andreykurilin/mlm",
"id": "5596f08a2c26200769914b7cf40fda4686de0c00",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlm/db/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1071"
},
{
"name": "HTML",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "42917"
}
],
"symlink_target": ""
}
|
"""Support for LaCrosse sensor components."""
from datetime import timedelta
import logging
import pylacrosse
from serial import SerialException
import voluptuous as vol
from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_ID,
CONF_NAME,
CONF_SENSORS,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BAUD = "baud"
CONF_DATARATE = "datarate"
CONF_EXPIRE_AFTER = "expire_after"
CONF_FREQUENCY = "frequency"
CONF_JEELINK_LED = "led"
CONF_TOGGLE_INTERVAL = "toggle_interval"
CONF_TOGGLE_MASK = "toggle_mask"
DEFAULT_DEVICE = "/dev/ttyUSB0"
DEFAULT_BAUD = "57600"
DEFAULT_EXPIRE_AFTER = 300
TYPES = ["battery", "humidity", "temperature"]
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DATARATE): cv.positive_int,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_JEELINK_LED): cv.boolean,
vol.Optional(CONF_TOGGLE_INTERVAL): cv.positive_int,
vol.Optional(CONF_TOGGLE_MASK): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LaCrosse sensors."""
usb_device = config.get(CONF_DEVICE)
baud = int(config.get(CONF_BAUD))
expire_after = config.get(CONF_EXPIRE_AFTER)
_LOGGER.debug("%s %s", usb_device, baud)
try:
lacrosse = pylacrosse.LaCrosse(usb_device, baud)
lacrosse.open()
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return False
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda event: lacrosse.close())
if CONF_JEELINK_LED in config:
lacrosse.led_mode_state(config.get(CONF_JEELINK_LED))
if CONF_FREQUENCY in config:
lacrosse.set_frequency(config.get(CONF_FREQUENCY))
if CONF_DATARATE in config:
lacrosse.set_datarate(config.get(CONF_DATARATE))
if CONF_TOGGLE_INTERVAL in config:
lacrosse.set_toggle_interval(config.get(CONF_TOGGLE_INTERVAL))
if CONF_TOGGLE_MASK in config:
lacrosse.set_toggle_mask(config.get(CONF_TOGGLE_MASK))
lacrosse.start_scan()
sensors = []
for device, device_config in config[CONF_SENSORS].items():
_LOGGER.debug("%s %s", device, device_config)
typ = device_config.get(CONF_TYPE)
sensor_class = TYPE_CLASSES[typ]
name = device_config.get(CONF_NAME, device)
sensors.append(
sensor_class(hass, lacrosse, device, name, expire_after, device_config)
)
add_entities(sensors)
class LaCrosseSensor(Entity):
"""Implementation of a Lacrosse sensor."""
_temperature = None
_humidity = None
_low_battery = None
_new_battery = None
def __init__(self, hass, lacrosse, device_id, name, expire_after, config):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._config = config
self._name = name
self._value = None
self._expire_after = expire_after
self._expiration_trigger = None
lacrosse.register_callback(
int(self._config["id"]), self._callback_lacrosse, None
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {
"low_battery": self._low_battery,
"new_battery": self._new_battery,
}
return attributes
def _callback_lacrosse(self, lacrosse_sensor, user_data):
"""Handle a function that is called from pylacrosse with new values."""
if self._expire_after is not None and self._expire_after > 0:
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=self._expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self.value_is_expired, expiration_at
)
self._temperature = lacrosse_sensor.temperature
self._humidity = lacrosse_sensor.humidity
self._low_battery = lacrosse_sensor.low_battery
self._new_battery = lacrosse_sensor.new_battery
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._value = None
self.async_write_ha_state()
class LaCrosseTemperature(LaCrosseSensor):
"""Implementation of a Lacrosse temperature sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def state(self):
"""Return the state of the sensor."""
return self._temperature
class LaCrosseHumidity(LaCrosseSensor):
"""Implementation of a Lacrosse humidity sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
@property
def state(self):
"""Return the state of the sensor."""
return self._humidity
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:water-percent"
class LaCrosseBattery(LaCrosseSensor):
"""Implementation of a Lacrosse battery sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._low_battery is None:
state = None
elif self._low_battery is True:
state = "low"
else:
state = "ok"
return state
@property
def icon(self):
"""Icon to use in the frontend."""
if self._low_battery is None:
icon = "mdi:battery-unknown"
elif self._low_battery is True:
icon = "mdi:battery-alert"
else:
icon = "mdi:battery"
return icon
TYPE_CLASSES = {
"temperature": LaCrosseTemperature,
"humidity": LaCrosseHumidity,
"battery": LaCrosseBattery,
}
|
{
"content_hash": "d9778a3e480e45f2e17ed0de17a1b226",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 84,
"avg_line_length": 29.575,
"alnum_prop": 0.6380670611439843,
"repo_name": "partofthething/home-assistant",
"id": "f65c792ddb0e95a9d1370c8e423e8c1533e1b0b0",
"size": "7098",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lacrosse/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
from case_script import TestPythonScript
import os
import shutil
class ResnetTest(TestPythonScript):
@property
def script(self):
return '../examples/ResNet/imagenet-resnet.py'
def test(self):
self.assertSurvive(self.script, args=['--data .',
'--gpu 0', '--fake', '--data_format NHWC'], timeout=10)
def tearDown(self):
super(ResnetTest, self).tearDown()
if os.path.isdir('ilsvrc'):
shutil.rmtree('ilsvrc')
|
{
"content_hash": "5168232f1ce881d8f891805c1b113db4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 101,
"avg_line_length": 27.157894736842106,
"alnum_prop": 0.5813953488372093,
"repo_name": "haamoon/tensorpack",
"id": "b49e0dfa50f0b238ec586719cd8f010ccac030a7",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4039"
},
{
"name": "Makefile",
"bytes": "1566"
},
{
"name": "Python",
"bytes": "574087"
},
{
"name": "Shell",
"bytes": "2281"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import copy
import re
import sys
from io import BytesIO
from itertools import chain
from django.conf import settings
from django.core import signing
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import ImmutableList, MultiValueDict
from django.utils.encoding import (
escape_uri_path, force_bytes, force_str, force_text, iri_to_uri,
)
from django.utils.http import is_same_domain
from django.utils.six.moves.urllib.parse import (
parse_qsl, quote, urlencode, urljoin, urlsplit,
)
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
def __repr__(self):
if self.method is None or not self.get_full_path():
return force_str('<%s>' % self.__class__.__name__)
return force_str(
'<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path()))
)
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(self.path),
'/' if force_append_slash and not self.path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Returns 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header) == value:
return 'https'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend(encode(k, force_bytes(v, self.encoding))
for v in list_)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
if pattern == '*' or is_same_domain(host, pattern):
return True
return False
|
{
"content_hash": "ddeb923ea7438747532f736460cb1530",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 110,
"avg_line_length": 36.7797833935018,
"alnum_prop": 0.5887318413820181,
"repo_name": "ntuecon/server",
"id": "cec2add58e2709fd8e755d3c5458e706eb7646c9",
"size": "20376",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/django/http/request.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
}
|
"""Tags for Django template system that help generating QR codes."""
from typing import Optional, Any, Union
from django import template
from qr_code.qrcode.maker import make_qr_code_with_args, make_qr_code_url_with_args
from qr_code.qrcode.utils import (
make_google_play_text,
make_tel_text,
make_sms_text,
make_youtube_text,
WifiConfig,
ContactDetail,
Coordinates,
EpcData,
VCard,
Email,
MeCard,
VEvent,
)
register = template.Library()
def _make_app_qr_code_from_obj_or_kwargs(
obj_or_kwargs, expected_cls, embedded: bool, qr_code_args: dict, extra_qr_code_args: Optional[dict] = None, force_text: bool = True
) -> str:
if isinstance(obj_or_kwargs, expected_cls):
obj = obj_or_kwargs
else:
# For compatibility with existing views and templates, try to build from dict.
obj = expected_cls(**obj_or_kwargs)
final_args = {**qr_code_args}
if extra_qr_code_args:
final_args.update(extra_qr_code_args)
if embedded:
return make_qr_code_with_args(obj.make_qr_code_data(), qr_code_args=final_args, force_text=force_text)
else:
return make_qr_code_url_with_args(obj.make_qr_code_data(), qr_code_args=final_args, force_text=force_text)
def _make_google_maps_qr_code(embedded: bool, **kwargs) -> str:
if "coordinates" in kwargs:
coordinates = kwargs.pop("coordinates")
else:
coordinates = Coordinates(kwargs.pop("latitude"), kwargs.pop("longitude"))
if embedded:
return make_qr_code_with_args(coordinates.make_google_maps_text(), qr_code_args=kwargs)
else:
return make_qr_code_url_with_args(coordinates.make_google_maps_text(), qr_code_args=kwargs)
def _make_geolocation_qr_code(embedded: bool, **kwargs) -> str:
if "coordinates" in kwargs:
coordinates = kwargs.pop("coordinates")
else:
coordinates = Coordinates(kwargs.pop("latitude"), kwargs.pop("longitude"), kwargs.pop("altitude"))
if embedded:
return make_qr_code_with_args(coordinates.make_geolocation_text(), qr_code_args=kwargs)
else:
return make_qr_code_url_with_args(coordinates.make_geolocation_text(), qr_code_args=kwargs)
@register.simple_tag()
def qr_from_text(text: str, **kwargs) -> str:
return make_qr_code_with_args(data=text, qr_code_args=kwargs)
@register.simple_tag()
def qr_from_data(data: Any, **kwargs) -> str:
return make_qr_code_with_args(data=data, qr_code_args=kwargs, force_text=False)
@register.simple_tag()
def qr_for_email(email: Union[str, Email], **kwargs) -> str:
if isinstance(email, str):
# Handle simple case where e-mail is simple the electronic address.
email = Email(to=email)
return _make_app_qr_code_from_obj_or_kwargs(email, Email, embedded=True, qr_code_args=kwargs)
@register.simple_tag()
def qr_for_tel(phone_number: Any, **kwargs) -> str:
return make_qr_code_with_args(make_tel_text(phone_number), qr_code_args=kwargs)
@register.simple_tag()
def qr_for_sms(phone_number: Any, **kwargs) -> str:
return make_qr_code_with_args(make_sms_text(phone_number), qr_code_args=kwargs)
@register.simple_tag()
def qr_for_geolocation(**kwargs) -> str:
"""Accepts a *'coordinates'* keyword argument or a triplet *'latitude'*, *'longitude'*, and *'altitude'*."""
return _make_geolocation_qr_code(embedded=True, **kwargs)
@register.simple_tag()
def qr_for_google_maps(**kwargs) -> str:
"""Accepts a *'coordinates'* keyword argument or a pair *'latitude'* and *'longitude'*."""
return _make_google_maps_qr_code(embedded=True, **kwargs)
@register.simple_tag()
def qr_for_youtube(video_id: str, **kwargs) -> str:
return make_qr_code_with_args(make_youtube_text(video_id), qr_code_args=kwargs)
@register.simple_tag()
def qr_for_google_play(package_id: str, **kwargs) -> str:
return make_qr_code_with_args(make_google_play_text(package_id), qr_code_args=kwargs)
@register.simple_tag()
def qr_for_contact(contact_detail, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(contact_detail, ContactDetail, embedded=True, qr_code_args=kwargs)
@register.simple_tag()
def qr_for_vcard(vcard, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(vcard, VCard, embedded=True, qr_code_args=kwargs)
@register.simple_tag()
def qr_for_mecard(mecard, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(mecard, MeCard, embedded=True, qr_code_args=kwargs)
@register.simple_tag()
def qr_for_wifi(wifi_config, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(wifi_config, WifiConfig, embedded=True, qr_code_args=kwargs)
@register.simple_tag()
def qr_for_epc(epc_data, **kwargs) -> str:
extra = dict(
error_correction="M",
boost_error=False,
micro=False,
encoding="utf-8",
)
return _make_app_qr_code_from_obj_or_kwargs(
epc_data, EpcData, embedded=True, qr_code_args=kwargs, extra_qr_code_args=extra, force_text=False
)
@register.simple_tag()
def qr_for_event(event, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(event, VEvent, embedded=True, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_from_text(text: str, **kwargs) -> str:
return make_qr_code_url_with_args(data=text, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_from_data(data: Any, **kwargs) -> str:
return make_qr_code_url_with_args(data=data, qr_code_args=kwargs, force_text=False)
@register.simple_tag()
def qr_url_for_email(email: Union[str, Email], **kwargs) -> str:
if isinstance(email, str):
# Handle simple case where e-mail is simple the electronic address.
email = Email(to=email)
return _make_app_qr_code_from_obj_or_kwargs(email, Email, embedded=False, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_tel(phone_number: Any, **kwargs) -> str:
return make_qr_code_url_with_args(make_tel_text(phone_number), qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_sms(phone_number: Any, **kwargs) -> str:
return make_qr_code_url_with_args(make_sms_text(phone_number), qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_geolocation(**kwargs) -> str:
"""Accepts a *'coordinates'* keyword argument or a triplet *'latitude'*, *'longitude'*, and *'altitude'*."""
return _make_geolocation_qr_code(embedded=False, **kwargs)
@register.simple_tag()
def qr_url_for_google_maps(**kwargs) -> str:
"""Accepts a *'coordinates'* keyword argument or a pair *'latitude'* and *'longitude'*."""
return _make_google_maps_qr_code(embedded=False, **kwargs)
@register.simple_tag()
def qr_url_for_youtube(video_id: str, **kwargs) -> str:
return make_qr_code_url_with_args(make_youtube_text(video_id), qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_google_play(package_id: str, **kwargs) -> str:
return make_qr_code_url_with_args(make_google_play_text(package_id), qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_contact(contact_detail, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(contact_detail, ContactDetail, embedded=False, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_vcard(vcard, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(vcard, VCard, embedded=False, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_mecard(mecard, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(mecard, MeCard, embedded=False, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_wifi(wifi_config, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(wifi_config, WifiConfig, embedded=False, qr_code_args=kwargs)
@register.simple_tag()
def qr_url_for_epc(epc_data, **kwargs) -> str:
extra = dict(
error_correction="M",
boost_error=False,
micro=False,
encoding="utf-8",
)
return _make_app_qr_code_from_obj_or_kwargs(
epc_data, EpcData, embedded=False, qr_code_args=kwargs, extra_qr_code_args=extra, force_text=False
)
@register.simple_tag()
def qr_url_for_event(event, **kwargs) -> str:
return _make_app_qr_code_from_obj_or_kwargs(event, VEvent, embedded=False, qr_code_args=kwargs)
|
{
"content_hash": "84649aa061e50b9149e1d0d1959b4bad",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 135,
"avg_line_length": 34.72573839662447,
"alnum_prop": 0.685176184690158,
"repo_name": "dprog-philippe-docourt/django-qr-code",
"id": "4239d1af2ff76893fc29865b71602b678d5a6710",
"size": "8230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qr_code/templatetags/qr_code.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "28762"
},
{
"name": "Python",
"bytes": "204625"
},
{
"name": "Shell",
"bytes": "3307"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class HobbiesConfig(AppConfig):
name = 'hobbies'
|
{
"content_hash": "17b7cac2df92017e36ffd4c666abffa6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "pollitosabroson/retoglobal",
"id": "bc3f4579baf511addc6ba7f7c70d2fe293e47ccc",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hobbies/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18900"
}
],
"symlink_target": ""
}
|
"""Check possible unbalanced tuple unpacking """
from __future__ import absolute_import
from functional.unpacking import unpack
# pylint: disable=using-constant-test, useless-object-inheritance
def do_stuff():
"""This is not right."""
first, second = 1, 2, 3 # [unbalanced-tuple-unpacking]
return first + second
def do_stuff1():
"""This is not right."""
first, second = [1, 2, 3] # [unbalanced-tuple-unpacking]
return first + second
def do_stuff2():
"""This is not right."""
(first, second) = 1, 2, 3 # [unbalanced-tuple-unpacking]
return first + second
def do_stuff3():
"""This is not right."""
first, second = range(100)
return first + second
def do_stuff4():
""" This is right """
first, second = 1, 2
return first + second
def do_stuff5():
""" This is also right """
first, second = (1, 2)
return first + second
def do_stuff6():
""" This is right """
(first, second) = (1, 2)
return first + second
def temp():
""" This is not weird """
if True:
return [1, 2]
return [2, 3, 4]
def do_stuff7():
""" This is not right, but we're not sure """
first, second = temp()
return first + second
def temp2():
""" This is weird, but correct """
if True:
return (1, 2)
if True:
return (2, 3)
return (4, 5)
def do_stuff8():
""" This is correct """
first, second = temp2()
return first + second
def do_stuff9():
""" This is not correct """
first, second = unpack() # [unbalanced-tuple-unpacking]
return first + second
class UnbalancedUnpacking(object):
""" Test unbalanced tuple unpacking in instance attributes. """
# pylint: disable=attribute-defined-outside-init, invalid-name, too-few-public-methods
def test(self):
""" unpacking in instance attributes """
# we're not sure if temp() returns two or three values
# so we shouldn't emit an error
self.a, self.b = temp()
self.a, self.b = temp2()
self.a, self.b = unpack() # [unbalanced-tuple-unpacking]
def issue329(*args):
""" Don't emit unbalanced tuple unpacking if the
rhs of the assignment is a variable-length argument,
because we don't know the actual length of the tuple.
"""
first, second, third = args
return first, second, third
def test_decimal():
"""Test a false positive with decimal.Decimal.as_tuple
See astroid https://bitbucket.org/logilab/astroid/issues/92/
"""
from decimal import Decimal
dec = Decimal(2)
first, second, third = dec.as_tuple()
return first, second, third
def test_issue_559():
"""Test that we don't have a false positive wrt to issue #559."""
from ctypes import c_int
root_x, root_y, win_x, win_y = [c_int()] * 4
return root_x, root_y, win_x, win_y
|
{
"content_hash": "8b8fdadd36f6f61f5709843d0682a2ed",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 90,
"avg_line_length": 26.35185185185185,
"alnum_prop": 0.6159522136331693,
"repo_name": "ekwoodrich/python-dvrip",
"id": "afa217f40cbe57d7f6318af82d3c721b445fb489",
"size": "2846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/lib/python3.5/site-packages/pylint/test/functional/unbalanced_tuple_unpacking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5706"
}
],
"symlink_target": ""
}
|
""" Module for text processing.
"""
import nltk.tokenize
import re
from barbante.utils.profiling import profile
import barbante.utils.logging as barbante_logger
log = barbante_logger.get_logger(__name__)
def get_stems(tokens, language):
""" Returns the stems of the informed tokens.
:param tokens: A list of tokens.
:param language: The language of the tokens.
:returns: A list of tokens, in the same corresponding order as in the *tokens* list.
"""
result = []
try:
stemmer = nltk.stem.SnowballStemmer(language)
except ValueError:
return tokens
for token in tokens:
try:
stem = stemmer.stem(token)
result += [stem]
except Exception as err:
log.error("Error while stemming {0} term [{1}]: {2}".format(language, token, err))
return result
@profile
def calculate_tf(lang="", doc=""):
""" Returns a map with all non-stopwords and its respective frequencies.
Ex: {"work": 1, "going": 1}
"""
tf_by_stem = {}
# Cleaning document
doc = re.sub(" +", " ", doc).lower()
tokens = remove_stopwords(tokenize(doc), lang, min_len=3, max_len=30)
stems = get_stems(tokens, lang)
for stem in stems:
tf_by_stem[stem] = tf_by_stem.get(stem, 0) + 1
return tf_by_stem
@profile
def calculate_tf_from_stems(stems):
""" Returns a map with the stems respective frequencies.
Ex: {"work": 1, "going": 1}
"""
tf_by_stem = {}
for stem in stems:
tf_by_stem[stem] = tf_by_stem.get(stem, 0) + 1
return tf_by_stem
def remove_stopwords(tokens, language, min_len=1, max_len=30):
""" Removes the stopwords from a list of terms.
:param tokens: A list of tokens.
:param language: The language of the terms.
:param min_len: The minimum size to be considered when removing stopwords.
:param max_len: The maximum size to be considered when removing stopwords.
:returns: A list of tokens free of stopwords.
"""
try:
stopwords = set(nltk.corpus.stopwords.words(language))
except Exception:
return tokens
stopwords.add("amp")
stopwords.add("quot")
stopwords.add("href")
stopwords.add("http")
stopwords.add("://")
stopwords.add(".&#")
try:
result = [w for w in tokens if w not in stopwords and
min_len <= len(w) <= max_len]
except IOError:
return
except AttributeError:
return
except TypeError as error:
raise TypeError(
"barbante.utils.text.remove_stopwords: {0}".format(error))
return result
def parse_text_to_stems(language, text, min_length=3):
""" Parse a text attribute performing cleanup, tokenization, stemmization and removal of stop-words.
:param language: The text language, relevant for stemmization.
:param text: The text to be stemmized.
:param min_length: The minimum number of characters that a word must have; otherwise it is discarded.
:returns: A list of terms.
"""
text = re.sub(" +", " ", text).lower()
tokens = tokenize(text)
stems = get_stems(tokens, language)
return remove_stopwords(stems, language, min_length)
def tokenize(text):
""" Returns a list with all words (tokens) in *text*.
:param text: The text to be tokenized.
:returns: A list of tokens.
See also: http://www.nltk.org/api/nltk.tokenize.html
"""
if type(text) is not str:
raise TypeError("barbante.utils.text.tokenize: text must be a string.")
invalid_characters = set()
invalid_characters.add("\n")
invalid_characters.add("\t")
invalid_characters.add(".")
invalid_characters.add(",")
invalid_characters.add("?")
invalid_characters.add("!")
invalid_characters.add("$")
invalid_characters.add('"')
# We may want to replace each invalid character with a reserved mnemonic.
text_as_list = [c for c in text if c not in invalid_characters]
text = ''.join(text_as_list)
text = text.strip()
tokens = nltk.tokenize.WordPunctTokenizer().tokenize(text)
return tokens
def count_common_terms(list1, list2):
""" Returns the number of common terms in two lists of terms.
"""
if list1 is None or list2 is None:
return 0
return len(set(list1) & set(list2))
|
{
"content_hash": "22f39e2cb9391a332529ea0e5f1f36cb",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 109,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.6288120163859808,
"repo_name": "hypermindr/barbante",
"id": "6540a539258deb1aa2112f8c4931bedf720e18b7",
"size": "4394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbante/utils/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "728872"
}
],
"symlink_target": ""
}
|
import datetime
import gzip
import os
import unittest
import numpy as np
import pytz
from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct
from cerebralcortex.data_processor.signalprocessing.ecg import rr_interval_update, compute_moving_window_int, \
check_peak, compute_r_peaks, remove_close_peaks, confirm_peaks, compute_rr_intervals
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
class TestRPeakDetect(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestRPeakDetect, cls).setUpClass()
tz = pytz.timezone('US/Eastern')
cls.ecg = []
cls._fs = 64.0
with gzip.open(os.path.join(os.path.dirname(__file__), 'res/ecg.csv.gz'), 'rt') as f:
for l in f:
values = list(map(int, l.split(',')))
cls.ecg.append(
DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
cls.ecg_datastream = DataStream(None, None)
cls.ecg_datastream.data = cls.ecg
def test_rr_interval_update(self):
rpeak_temp1 = [i for i in range(0, 100, 10)]
rr_ave = 4.5
self.assertEqual(rr_interval_update(rpeak_temp1, rr_ave), 10.0)
def test_rr_interval_update_small(self):
rpeak_temp1 = [i for i in range(0, 100, 100)]
rr_ave = 4.5
self.assertEqual(rr_interval_update(rpeak_temp1, rr_ave), 4.5)
def test_rr_interval_update_min_size(self):
rpeak_temp1 = [i for i in range(0, 100, 10)]
rr_ave = 4.5
self.assertEqual(rr_interval_update(rpeak_temp1, rr_ave, min_size=4), 10)
self.assertEqual(rr_interval_update(rpeak_temp1, rr_ave, min_size=1), 10)
self.assertEqual(rr_interval_update(rpeak_temp1, rr_ave, min_size=10), 9)
self.assertEqual(rr_interval_update(rpeak_temp1, rr_ave, min_size=25), 4.5)
def test_compute_moving_window_int(self):
sample = np.array([i for i in range(1, 40, 5)])
fs = 64
blackman_win_len = np.ceil(fs / 5)
result = [0.1877978, 0.32752854, 0.52515934, 0.754176, 0.94976418, 1.03957192, 0.9830406, 0.79712449]
self.assertAlmostEqual(sum(compute_moving_window_int(sample, fs, blackman_win_len)), sum(result))
def test_check_peak(self):
data = [0, 1, 2, 1, 0]
self.assertTrue(check_peak(data)) # TODO: Change these to datapoints
data = [0, 1, 0, 1, 0]
self.assertFalse(check_peak(data)) # TODO: Change these to datapoints
data = [0, 1, 2, 3, 4, 3, 2, 1]
self.assertTrue(check_peak(data)) # TODO: Change these to datapoints
data = [0, 1]
self.assertFalse(check_peak(data)) # TODO: Change these to datapoints
def test_detect_rpeak(self, threshold: float = .5):
sample = np.array([i.sample for i in self.ecg])
blackman_win_len = np.ceil(self._fs / 5)
y = compute_moving_window_int(sample, self._fs, blackman_win_len)
peak_location_values = [(i, y[i]) for i in range(2, len(y) - 1) if check_peak(y[i - 2:i + 3])]
peak_location = [i[0] for i in peak_location_values]
running_rr_avg = sum(np.diff(peak_location)) / (len(peak_location) - 1)
rpeak_temp1 = compute_r_peaks(threshold, running_rr_avg, y, peak_location_values)
first_index_file = os.path.join(os.path.dirname(__file__), 'res/testmatlab_firstindex.csv')
peak_index1_from_data = np.genfromtxt(first_index_file, delimiter=',')
test_result = (len(list(set(rpeak_temp1) & set(peak_index1_from_data - 1))) * 100) / len(rpeak_temp1)
self.assertGreaterEqual(test_result, 99, 'Peaks after adaptive threshold is less than a 99 percent match')
rpeak_temp2 = remove_close_peaks(rpeak_temp1, sample, self._fs)
second_index_file = os.path.join(os.path.dirname(__file__), 'res/testmatlab_secondindex.csv')
peak_index2_from_data = np.genfromtxt(second_index_file, delimiter=',')
test_result = (len(list(set(rpeak_temp2) & set(peak_index2_from_data - 1))) * 100) / len(rpeak_temp2)
self.assertGreaterEqual(test_result, 99, 'Peaks after removing close peaks is less than a 99 percent match')
index = confirm_peaks(rpeak_temp2, sample, self._fs)
final_index_file = os.path.join(os.path.dirname(__file__), 'res/testmatlab_finalindex.csv')
peak_index3_from_data = np.genfromtxt(final_index_file, delimiter=',')
test_result = (len(list(set(index) & set(peak_index3_from_data - 1))) * 100) / len(index)
self.assertGreaterEqual(test_result, 99, 'Peaks after confirmation is less than a 99 percent match')
def test_ecgprocessing_timestamp_correction(self):
ecg_corrected = timestamp_correct(self.ecg_datastream, self._fs)
rr_datastream_from_raw = compute_rr_intervals(self.ecg_datastream, fs=self._fs)
rr_datastream_from_corrected = compute_rr_intervals(ecg_corrected, fs=self._fs)
test_result = (len(rr_datastream_from_corrected.data) * 100) / len(rr_datastream_from_raw.data)
self.assertGreaterEqual(test_result, 99)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "6508f1cfb5f53147b4979a9ba0015927",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 116,
"avg_line_length": 49.386792452830186,
"alnum_prop": 0.6529130850047755,
"repo_name": "nasirali1/CerebralCortex",
"id": "625c3eabc55c08847067229f4f94f075413bbc24",
"size": "6580",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cerebralcortex/data_processor/test/test_ecg.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "371276"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
import os
import shutil
import paths
VALID_EXTENSIONS = [
'.png',
'.jpg',
'.jpeg',
'.tga',
]
VALID_EXTENSIONS_WITHOUT_DOT = map(lambda ext: ext[1:], VALID_EXTENSIONS)
def is_valid_extension(extension):
"""Returns True is `extension` is a valid image extension to be used with
custom Steam grid images. There are only 4 such extensions - `.png`, `.jpg`,
`.jpeg`, and `.tga`.
This function will return true even if the parameter `expression` does not
include the leading '.'"""
return extension in VALID_EXTENSIONS or \
extension in VALID_EXTENSIONS_WITHOUT_DOT
def _valid_custom_image_paths(user_context, app_id):
parent_dir = paths.custom_images_directory(user_context)
possible_filenames = map(lambda ext: str(app_id) + ext, VALID_EXTENSIONS)
return map(lambda f: os.path.join(parent_dir, f), possible_filenames)
def has_custom_image(user_context, app_id):
"""Returns True if there exists a custom image for app_id."""
possible_paths = _valid_custom_image_paths(user_context, app_id)
return any(map(os.path.exists, possible_paths))
def get_custom_image(user_context, app_id):
"""Returns the custom image associated with a given app. If there are
multiple candidate images on disk, one is chosen arbitrarily."""
possible_paths = _valid_custom_image_paths(user_context, app_id)
existing_images = filter(os.path.exists, possible_paths)
if len(existing_images) > 0:
return existing_images[0]
def set_custom_image(user_context, app_id, image_path):
"""Sets the custom image for `app_id` to be the image located at
`image_path`. If there already exists a custom image for `app_id` it will
be deleted. Returns True is setting the image was successful."""
if image_path is None:
return False
if not os.path.exists(image_path):
return False
(root, ext) = os.path.splitext(image_path)
if not is_valid_extension(ext):
# TODO: Maybe log that this happened?
return False
# If we don't remove the old image then theres no guarantee that Steam will
# show our new image when it launches.
if has_custom_image(user_context, app_id):
img = get_custom_image(user_context, app_id)
assert(img is not None)
os.remove(img)
# Set the new image
parent_dir = paths.custom_images_directory(user_context)
new_path = os.path.join(parent_dir, app_id + ext)
shutil.copyfile(image_path, new_path)
return True
|
{
"content_hash": "055472437aa2bd807bd70f50ca42e7d7",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 35.38235294117647,
"alnum_prop": 0.7111388196176226,
"repo_name": "scottrice/pysteam",
"id": "3295b413aea1880b0ffe06c5004760dc3ac58806",
"size": "2425",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pysteam/grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56532"
}
],
"symlink_target": ""
}
|
import json
import urllib2
from django.contrib import admin
from books.models import Book
from data.models import Data
domain = "http://smartebook.zmapp.com:9026"
# 同步图书详情
def sync_book(bid, cm):
# 完结了的图书不更新信息
if Book.objects.filter(id=bid, status=1).count() == 0:
page = urllib2.urlopen("http://wap.cmread.com/r/p/viewdata.jsp?bid=%s&cm=%s&vt=9" % (bid, cm))
data = page.read()
try:
result = json.loads(data, encoding="utf-8")
print result
update = Book.objects.filter(id=bid).count() != 0
book = Book()
book.pk = int(bid)
book.name = result['showName']
book.brief = result['brief']
book.desc = result['desc']
book.cover_url = result['bigCoverLogo']
book.cover_url_small = result['smallCoverLogo']
book.status = result['status']
book.first_cid = result['firstChpaterCid']
book.last_cid = result['lastChapterCid']
book.chapter_size = result['chapterSize']
book.score = result['score']
book.word_size = result['wordSize']
book.click_amount = result['clickValue']
book.kw = result['kw']
book.price = int(float(result['price']) * 100)
book.charge_mode = result['chargeMode']
if update:
book.save(force_update=update, update_fields=(
'name', 'brief', 'desc', 'cover_url', 'cover_url_small', 'status', 'first_cid', 'last_cid',
'chapter_size', 'score', 'word_size', 'click_amount', 'kw', 'price', 'charge_mode'))
else:
book.save(force_insert=True)
return True
except Exception, e:
print e.message
return False
# 同步书架
def sync_bookshelf():
url = "%s/smart_book/get_bookshelf" % domain
page = urllib2.urlopen(url)
result = json.loads(page.read())
print result
books = result['bookshelf']
update_count = 0
for index, b in enumerate(books):
if sync_book(b['book_id'], 'zm'):
update_count += 1
return len(books), update_count
@admin.register(Data)
class DataAdmin(admin.ModelAdmin):
"""
数据
"""
list_display = ['name']
actions = ['sync_data']
def sync_data(self, request, queryset):
total, update = sync_bookshelf()
message = "总共%d本,同步了%d本" % (total, update)
print message
self.message_user(request, message)
sync_data.short_description = "同步所选的 数据"
admin.site.disable_action('delete_selected')
|
{
"content_hash": "33ccb27273028c514cd1c45ea01abb85",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 111,
"avg_line_length": 29.795454545454547,
"alnum_prop": 0.5671243325705568,
"repo_name": "lnybrave/zzbook",
"id": "d0ef021963320d71393e8a0e64c06c24381a8ed7",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "546"
},
{
"name": "Python",
"bytes": "112060"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals
__doc__ = """
Creates parenthesized letters and numbers: one.paren, two.paren, three.paren, four.paren, five.paren, six.paren, seven.paren, eight.paren, nine.paren, one_zero.paren, one_one.paren, one_two.paren, one_three.paren, one_four.paren, one_five.paren, one_six.paren, one_seven.paren, one_eight.paren, one_nine.paren, two_zero.paren, a.paren, b.paren, c.paren, d.paren, e.paren, f.paren, g.paren, h.paren, i.paren, j.paren, k.paren, l.paren, m.paren, n.paren, o.paren, p.paren, q.paren, r.paren, s.paren, t.paren, u.paren, v.paren, w.paren, x.paren, y.paren, z.paren.
"""
import math
from Foundation import NSPoint
distanceBetweenComponents = 95.0
parenShiftForLetters = 40.0
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
parenGlyphs = [
"one.paren", "two.paren", "three.paren", "four.paren", "five.paren", "six.paren", "seven.paren", "eight.paren", "nine.paren", "one_zero.paren", "one_one.paren",
"one_two.paren", "one_three.paren", "one_four.paren", "one_five.paren", "one_six.paren", "one_seven.paren", "one_eight.paren", "one_nine.paren", "two_zero.paren", "a.paren",
"b.paren", "c.paren", "d.paren", "e.paren", "f.paren", "g.paren", "h.paren", "i.paren", "j.paren", "k.paren", "l.paren", "m.paren", "n.paren", "o.paren", "p.paren", "q.paren",
"r.paren", "s.paren", "t.paren", "u.paren", "v.paren", "w.paren", "x.paren", "y.paren", "z.paren"
]
def measureLayerAtHeightFromLeftOrRight(thisLayer, height, leftSide=True):
thisLayer = thisLayer.copyDecomposedLayer()
try:
leftX = thisLayer.bounds.origin.x
rightX = leftX + thisLayer.bounds.size.width
y = height
returnIndex = 1
if not leftSide:
returnIndex = -2
measurements = thisLayer.intersectionsBetweenPoints(NSPoint(leftX, y), NSPoint(rightX, y))
if len(measurements) > 2:
measurement = measurements[returnIndex].pointValue().x
if leftSide:
distance = measurement - leftX
else:
distance = rightX - measurement
return distance
else:
return None
except:
return None
def minDistanceBetweenTwoLayers(comp1, comp2, interval=5.0):
topY = min(comp1.bounds.origin.y + comp1.bounds.size.height, comp2.bounds.origin.y + comp2.bounds.size.height)
bottomY = max(comp1.bounds.origin.y, comp2.bounds.origin.y)
distance = topY - bottomY
minDist = None
for i in range(int(distance / interval)):
height = bottomY + i * interval
left = measureLayerAtHeightFromLeftOrRight(comp1, height, leftSide=False)
right = measureLayerAtHeightFromLeftOrRight(comp2, height, leftSide=True)
try: # avoid gaps like in i or j
total = left + right
if minDist == None or minDist > total:
minDist = total
except:
print("None!", minDist, height, comp1.parent.name, left, comp2.parent.name, right)
pass
return minDist
def placeComponentsAtDistance(thisLayer, comp1, comp2, interval=5.0, distance=10.0):
thisMaster = thisLayer.associatedFontMaster()
masterID = thisMaster.id
original1 = comp1.component.layers[masterID]
original2 = comp2.component.layers[masterID]
minDist = minDistanceBetweenTwoLayers(original1, original2, interval=interval)
if minDist != None:
comp2shift = distance - minDist
addedSBs = original1.RSB + original2.LSB
comp2.x = comp1.x + original1.width - addedSBs + comp2shift
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0):
"""
Returns an NSAffineTransform object for transforming layers.
Apply an NSAffineTransform t object like this:
Layer.transform_checkForSelection_doComponents_(t,False,True)
Access its transformation matrix like this:
tMatrix = t.transformStruct() # returns the 6-float tuple
Apply the matrix tuple like this:
Layer.applyTransform(tMatrix)
Component.applyTransform(tMatrix)
Path.applyTransform(tMatrix)
Chain multiple NSAffineTransform objects t1, t2 like this:
t1.appendTransform_(t2)
"""
myTransform = NSAffineTransform.transform()
if rotate:
myTransform.rotateByDegrees_(rotate)
if scale != 1.0:
myTransform.scaleBy_(scale)
if not (shiftX == 0.0 and shiftY == 0.0):
myTransform.translateXBy_yBy_(shiftX, shiftY)
if skew:
skewStruct = NSAffineTransformStruct()
skewStruct.m11 = 1.0
skewStruct.m22 = 1.0
skewStruct.m21 = math.tan(math.radians(skew))
skewTransform = NSAffineTransform.transform()
skewTransform.setTransformStruct_(skewStruct)
myTransform.appendTransform_(skewTransform)
return myTransform
def unsuffixed(name):
if "." in name:
return name[:name.find(".")]
else:
return name
def process(thisGlyph):
parts = ["parenleft"] + unsuffixed(thisGlyph.name).split("_") + ["parenright"]
maxWidth = thisFont.upm
thisGlyph.leftMetricsKey = None
thisGlyph.rightMetricsKey = None
print("-".join(parts))
for thisLayer in thisGlyph.layers:
thisLayer.clear()
for i, part in enumerate(parts):
ucName = "%s.case" % part
lfName = "%s.lf" % part
if thisGlyph.glyphInfo.subCategory == "Uppercase" or thisGlyph.glyphInfo.category == "Number":
if thisFont.glyphs[ucName]:
part = ucName
elif thisFont.glyphs[lfName]:
part = lfName
comp = GSComponent(part)
thisLayer.components.append(comp)
if i > 0:
placeComponentsAtDistance(thisLayer, thisLayer.components[i - 1], comp, distance=distanceBetweenComponents)
#thisLayer.decomposeComponents()
maxWidth = max(thisLayer.bounds.size.width * 0.97, maxWidth)
return maxWidth
def postprocess(thisGlyph, scale, shiftUp):
for thisLayer in thisGlyph.layers:
#thisLayer.decomposeComponents()
#for thisComp in thisLayer.components:
# thisComp.makeDisableAlignment()
scaleDown = transform(scale=scale).transformStruct()
thisLayer.applyTransform(scaleDown)
thisLayer.applyTransform(shiftUp)
lsb = (thisFont.upm - thisLayer.bounds.size.width) // 2.0
thisLayer.LSB = lsb
thisLayer.width = thisFont.upm
if thisLayer.components[1].component.category == "Letter":
thisLayer.components[0].x -= parenShiftForLetters
thisLayer.components[2].x += parenShiftForLetters
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
try:
maxWidth = 0.0
for name in parenGlyphs:
thisGlyph = thisFont.glyphs[name]
if not thisGlyph:
thisGlyph = GSGlyph()
thisGlyph.name = name
thisFont.glyphs.append(thisGlyph)
print("Processing %s" % thisGlyph.name)
# thisGlyph.beginUndo() # undo grouping causes crashes
maxWidth = max(maxWidth, process(thisGlyph))
print(maxWidth)
# thisGlyph.endUndo() # undo grouping causes crashes
print(maxWidth)
scale = (thisFont.upm / maxWidth) * 0.95
yShift = transform(shiftY=thisFont.upm * 0.08).transformStruct()
for name in parenGlyphs:
thisGlyph = thisFont.glyphs[name]
#print "Post-processing %s" % thisGlyph.name
postprocess(thisGlyph, scale, yShift)
except Exception as e:
Glyphs.showMacroWindow()
print("\n⚠️ Script Error:\n")
import traceback
print(traceback.format_exc())
print()
raise e
finally:
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
{
"content_hash": "399ffc46199792144737df368b98504c",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 560,
"avg_line_length": 38.68681318681319,
"alnum_prop": 0.7308620934526345,
"repo_name": "mekkablue/Glyphs-Scripts",
"id": "2f0d9052d53c1a8d6079721bfcc69d871f8d7b85",
"size": "7108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Build Glyphs/Build Parenthesized Glyphs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2142474"
}
],
"symlink_target": ""
}
|
import wave
import numpy as np
import struct
import audioop
import sys
SAMPLE_RATE = 24000 # Output/test data sample rate
Q_FACTOR = 1 # Additional linear quantization (for testing only)
LIMIT = 20 # Number of files
MIN_DURATION = 4.0 # Minimum duration in seconds
def preprocess(oldFileName):
#preprocess takes in the name of a .wav file (oldFileName) and returns
#u-law downsampled version of the file
file = wave.open(filename, "rb")
num_channels = file.getnchannels()
sample_rate = file.getframerate()
sample_width = file.getsampwidth()
num_frames = file.getnframes()
#Grab the bytes from our WAV file
raw_frames = file.readframes(num_frames)
file.close()
total_samples = num_frames * num_channels
if sample_rate != SAMPLE_RATE:
u_law = audioop.ratecv(raw_frames, sample_width, num_channels, sample_rate, SAMPLE_RATE, None)
u_law = audioop.lin2ulaw(u_law[0], sample_width)
else:
u_law = audioop.lin2ulaw(raw_frames, sample_width)
u_law = list(u_law)
u_law = [ord(x)//Q_FACTOR for x in u_law]
return np.asarray(u_law)
def postprocess(data, newFileName, oldFileName):
#postprocess converts a numpy array of a u-law quantized .wav file
#into an actual file
#parameters are the data array, the name for the output file, and the original file name
# data is the u-law quantized sample
u_law = data
u_law = [chr(x) for x in u_law]
u_law = ''.join(u_law)
inputFile = wave.open(oldFileName, "rb")
sample_width = inputFile.getsampwidth()
num_channels = inputFile.getnchannels()
inputFile.close()
original = audioop.ulaw2lin(u_law, sample_width)
print "output data size: " + str(len(original))
output = wave.open(newFileName,'w')
output.setparams((num_channels, sample_width, SAMPLE_RATE, 0, 'NONE', 'not compressed'))
output.writeframes(original)
output.close()
if __name__ == "__main__":
if len(sys.argv) <= 1:
print "Must supply WAV path"
exit(-1)
wavfile_path = sys.argv[1]
data = preprocess(wavfile_path)
save_output(data, wavfile_path, 'test')
|
{
"content_hash": "8209ce8e89de90da30decc5a337557df",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 96,
"avg_line_length": 28.18918918918919,
"alnum_prop": 0.6907957813998082,
"repo_name": "YaleDataScience/wavenet",
"id": "c324941218e0be9b7e60f858dd8322a7f4f88b6e",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56516"
},
{
"name": "Python",
"bytes": "2086"
}
],
"symlink_target": ""
}
|
from requests.adapters import HTTPAdapter
from collections.abc import Mapping, Sequence
from types import FunctionType
from tqdm import tqdm
import time
import sys
import gc
class PoolTest(object):
def __init__(self, worker_count: int):
self.worker_count = worker_count
self.pool = self.init_pool(worker_count)
self.compute_resource = self.init_compute_resource()
self.network_resource = self.init_network_resource()
def init_pool(self, worker_count: int) -> object:
raise NotImplementedError("{} does not implement init_pool"
.format(self.__class__.__name__))
def destroy_pool(self):
pass
def map(self, work_func: FunctionType, inputs: Sequence) -> Sequence:
raise NotImplementedError("{} does not implement map"
.format(self.__class__.__name__))
def init_compute_resource(self) -> object:
from cmath import sqrt
return sqrt
def init_network_resource(self) -> object:
import requests
return requests.Session
@staticmethod
def do_compute_work(args) -> None:
compute_resource, num, *_ = args
sqrt = compute_resource
sqrt(sqrt(sqrt(num)))
@staticmethod
def do_network_work(args) -> None:
network_resource, *_ = args
Session = network_resource
with Session() as s:
adapter = HTTPAdapter(max_retries=3)
s.mount('http://', adapter)
s.get('http://localhost:8080/')
def run_compute_test(self, jobs: int, trials: int,
show_progress: bool=False) -> Mapping:
return self._run_test(self.do_compute_work, self.compute_resource,
jobs, trials, show_progress=show_progress)
def run_network_test(self, jobs: int, trials: int,
show_progress: bool=False) -> Mapping:
return self._run_test(self.do_network_work, self.network_resource,
jobs, trials, show_progress=show_progress)
def _run_test(self, work_func: FunctionType, work_resource: object,
jobs: int, trials: int,
show_progress: bool=False) -> Mapping:
results = {
'jobs': jobs,
'trials': trials,
'time': [],
'blocks': [],
}
# Forcibly evaluate the inputs to prevent time/resources taken up later
inputs = list(zip(
[work_resource] * jobs,
range(jobs)
))
trial_iter = range(trials)
if show_progress is True and trials > 2:
trial_iter = tqdm(trial_iter, desc='trials')
gc.collect()
for _ in trial_iter:
# Run trial of pool map function and measure it
gc.collect()
blocks_start = sys.getallocatedblocks()
time_start = time.time()
list(self.map(work_func, inputs))
time_end = time.time()
results['time'].append(time_end - time_start)
# Get allocated blocks before garbage collection to show peak usage
blocks_end = sys.getallocatedblocks()
results['blocks'].append(blocks_end - blocks_start)
return results
|
{
"content_hash": "b866a3a2515ca6e164af8eaa66b8822a",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 36.56666666666667,
"alnum_prop": 0.5755089638407779,
"repo_name": "JohnStarich/python-pool-performance",
"id": "d83a4bf4abc4187e8123837a1c3ea6367d058ef2",
"size": "3291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pools/pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18592"
}
],
"symlink_target": ""
}
|
import sys
import petsc4py
petsc4py.init(sys.argv)
import numpy as np
# from scipy.io import loadmat
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from petsc4py import PETSc
from src.geo import *
from time import time
import pickle
def save_vtk(problem: sf.StokesFlowProblem):
t0 = time()
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
nth = problem_kwargs['nth']
nh = problem_kwargs['nh']
ch = problem_kwargs['ch']
rh1 = problem_kwargs['rh1']
rh2 = problem_kwargs['rh2']
ph = problem_kwargs['ph']
rU = problem_kwargs['rU']
n_helix_check = problem_kwargs['n_helix_check']
velocity_err = 0
# bgeo = geo()
# bnodesHeadle = problem_kwargs['bnodesHeadle']
# matname = problem_kwargs['matname']
# bgeo.mat_nodes(filename=matname, mat_handle=bnodesHeadle)
# belemsHeadle = problem_kwargs['belemsHeadle']
# bgeo.mat_elmes(filename=matname, mat_handle=belemsHeadle, elemtype='tetra')
# problem.vtk_tetra(fileHandle + '_Velocity', bgeo)
problem.vtk_obj(fileHandle)
helix_geo_check = supHelix() # force geo
# dth = 2 * np.pi / nth * 0.7
# dth = 2 * np.pi * 0.061 * np.sqrt(ch)
dth = np.max((2 * np.pi / nth * 0.8, 2 * np.pi * 0.061 * np.sqrt(ch)))
B = ph / (2 * np.pi)
helix_geo_check.create_deltatheta(dth=dth, radius=rh2, R=rh1, B=B, n_c=ch, epsilon=0, with_cover=1)
helix_check = sf.StokesFlowObj()
helix_check.set_data(helix_geo_check, helix_geo_check)
ang_helix = 2 * np.pi / nh # the angle of two nearest helixes.
norm = np.array((0, 0, 1))
for i0 in range(nh):
t_obj = helix_check.copy()
theta = i0 * ang_helix
t_obj.node_rotation(norm=norm, theta=theta)
t_obj.set_velocity(np.ones_like(t_obj.get_u_nodes()))
t_obj.set_rigid_velocity(np.array((0, 0, 0, 0, 0, rU)))
t_obj.set_name('helix_Check_%d' % i0)
velocity_err = velocity_err + problem.vtk_check('%s_Check_%d' % (fileHandle, i0), t_obj)[0]
velocity_err = velocity_err / nh
t1 = time()
PETSc.Sys.Print('velocity error is: %f' % velocity_err)
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return True
def get_problem_kwargs(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'helixInPipe')
prbHeadle = OptDB.getString('prbHeadle', 'construct07')
nth = OptDB.getInt('nth', 2) # amount of helix nodes
nh = OptDB.getInt('nh', 1) # total of helixes
hfct = OptDB.getReal('hfct', 1) # helix axis line factor, put more nodes near both tops
eh = OptDB.getReal('eh', -0.5) # epsilon of helix
ch = OptDB.getReal('ch', 0.1) # cycles of helix
rh1 = OptDB.getReal('rh1', 0.6) # radius of helix
rh2 = OptDB.getReal('rh2', 0.1) # radius of helix
ph = OptDB.getReal('ph', 0.2 * np.pi) # helix pitch
rU = OptDB.getReal('rU', 1) # rotation velocity
matname = OptDB.getString('mat', 'body1')
bnodesHeadle = OptDB.getString('bnodes', 'bnodes') # body nodes, for vtu output
belemsHeadle = OptDB.getString('belems', 'belems') # body tetrahedron mesh, for vtu output
solve_method = OptDB.getString('s', 'gmres')
precondition_method = OptDB.getString('g', 'none')
plot = OptDB.getBool('plot', False)
matrix_method = OptDB.getString('sm', 'pf_stokesletsInPipe')
restart = OptDB.getBool('restart', False)
twoPara_n = OptDB.getInt('tp_n', 1)
legendre_m = OptDB.getInt('legendre_m', 3)
legendre_k = OptDB.getInt('legendre_k', 2)
n_helix_check = OptDB.getInt('n_helix_check', 2000)
n_node_threshold = OptDB.getInt('n_threshold', 10000)
getConvergenceHistory = OptDB.getBool('getConvergenceHistory', False)
pickProblem = OptDB.getBool('pickProblem', False)
plot_geo = OptDB.getBool('plot_geo', False)
if prbHeadle[-9:] != '_pick.bin':
prbHeadle = prbHeadle + '_pick.bin'
problem_kwargs = {
'name': 'helixInPipe',
'matrix_method': matrix_method,
'nth': nth,
'nh': nh,
'hfct': hfct,
'eh': eh,
'ch': ch,
'rh1': rh1,
'rh2': rh2,
'ph': ph,
'rU': rU,
'matname': matname,
'bnodesHeadle': bnodesHeadle,
'belemsHeadle': belemsHeadle,
'solve_method': solve_method,
'precondition_method': precondition_method,
'plot': plot,
'fileHandle': fileHandle,
'prbHeadle': prbHeadle,
'twoPara_n': twoPara_n,
'legendre_m': legendre_m,
'legendre_k': legendre_k,
'restart': restart,
'n_helix_check': n_helix_check,
'n_node_threshold': n_node_threshold,
'getConvergenceHistory': getConvergenceHistory,
'pickProblem': pickProblem,
'plot_geo': plot_geo,
}
for key in main_kwargs:
problem_kwargs[key] = main_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
size = comm.Get_size()
fileHandle = problem_kwargs['fileHandle']
prbHeadle = problem_kwargs['prbHeadle']
matrix_method = problem_kwargs['matrix_method']
nth = problem_kwargs['nth']
nh = problem_kwargs['nh']
hfct = problem_kwargs['hfct']
eh = problem_kwargs['eh']
ch = problem_kwargs['ch']
rh1 = problem_kwargs['rh1']
rh2 = problem_kwargs['rh2']
ph = problem_kwargs['ph']
if rank == 0:
PETSc.Sys.Print('Case information: ')
# PETSc.Sys.Print(' pipe length: %f, pipe radius: %f' % (lp, rp))
PETSc.Sys.Print(' helix radius: %f and %f, helix pitch: %f, helix cycle: %f' % (rh1, rh2, ph, ch))
PETSc.Sys.Print(' nth, nh, hfct and epsilon of helix are %d, %d, %f and %f, ' % (nth, nh, hfct, eh))
err_msg = "Only 'rs_stokesletsInPipe', 'tp_rs_stokesletsInPipe', 'lg_rs_stokesletsInPipe', and 'pf_stokesletsInPipe' methods are accept for this main code. "
acceptType = ('rs_stokesletsInPipe', 'tp_rs_stokesletsInPipe', 'lg_rs_stokesletsInPipe', 'pf_stokesletsInPipe')
assert matrix_method in acceptType, err_msg
if matrix_method in 'rs_stokesletsInPipe':
PETSc.Sys.Print(' create matrix method: %s, ' % matrix_method)
elif matrix_method in 'tp_rs_stokesletsInPipe':
twoPara_n = problem_kwargs['twoPara_n']
PETSc.Sys.Print(' create matrix method: %s, order: %d'
% (matrix_method, twoPara_n))
elif matrix_method in 'lg_rs_stokesletsInPipe':
legendre_m = problem_kwargs['legendre_m']
legendre_k = problem_kwargs['legendre_k']
PETSc.Sys.Print(' create matrix method: %s, m: %d, k: %d, p: %d'
% (matrix_method, legendre_m, legendre_k, (legendre_m + 2 * legendre_k + 1)))
elif matrix_method in 'pf_stokesletsInPipe':
PETSc.Sys.Print(' create matrix method: %s ' % matrix_method)
else:
raise Exception('set how to print matrix method please. ')
solve_method = problem_kwargs['solve_method']
precondition_method = problem_kwargs['precondition_method']
PETSc.Sys.Print(' solve method: %s, precondition method: %s'
% (solve_method, precondition_method))
t_headle = '_pick.bin'
prbHeadle = prbHeadle if prbHeadle[-len(t_headle):] == t_headle else prbHeadle + t_headle
PETSc.Sys.Print(' read problem from: ' + prbHeadle)
PETSc.Sys.Print(' output file headle: ' + fileHandle)
PETSc.Sys.Print('MPI size: %d' % size)
# @profile
def main_fun(**main_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = get_problem_kwargs(**main_kwargs)
fileHandle = problem_kwargs['fileHandle']
prbHeadle = problem_kwargs['prbHeadle']
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
matrix_method = problem_kwargs['matrix_method']
# create helix
nth = problem_kwargs['nth']
nh = problem_kwargs['nh']
hfct = problem_kwargs['hfct']
eh = problem_kwargs['eh']
ch = problem_kwargs['ch']
rh1 = problem_kwargs['rh1']
rh2 = problem_kwargs['rh2']
ph = problem_kwargs['ph']
rU = problem_kwargs['rU']
B = ph / (2 * np.pi)
vhgeo = supHelix() # velocity node geo of helix
dth = 2 * np.pi / nth
fhgeo = vhgeo.create_deltatheta(dth=dth, radius=rh2, R=rh1, B=B, n_c=ch, epsilon=eh, with_cover=1, factor=hfct)
# vhgeo.show_nodes()
# vhgeo.show_velocity(length_factor=0.01)
vhobj = obj_dic[matrix_method]()
vhobj_kwargs = {'name': 'helix_0', }
vhobj.set_data(fhgeo, vhgeo, **vhobj_kwargs)
# load problem, solved force at (or outside) the pipe prepared.
t_headle = '_pick.bin'
prbHeadle = prbHeadle if prbHeadle[-len(t_headle):] == t_headle else prbHeadle + t_headle
with open(prbHeadle, 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
problem.set_kwargs(**problem_kwargs)
ang_helix = 2 * np.pi / nh # the angle of two nearest helixes.
norm = np.array((0, 0, 1))
for i0 in range(nh):
t_obj = vhobj.copy()
theta = i0 * ang_helix
t_obj.node_rotation(norm=norm, theta=theta)
t_obj.set_velocity(np.ones_like(t_obj.get_u_nodes()))
t_obj.set_rigid_velocity(np.array((0, 0, 0, 0, 0, rU)))
t_obj.set_name('helix_%d' % i0)
problem.add_obj(t_obj)
problem.print_info()
if problem_kwargs['plot_geo']:
problem.show_f_u_nodes()
problem.show_velocity(length_factor=0.001)
problem.create_matrix()
residualNorm = problem.solve()
# # debug
# problem.saveM_ASCII('%s_M.txt' % fileHandle)
if problem_kwargs['pickProblem']:
problem.pickmyself(fileHandle)
force_helix = vhobj.get_force_z()
PETSc.Sys.Print('---->>>Resultant at z axis is %f' % (np.sum(force_helix) / (6 * np.pi * rh1)))
save_vtk(problem)
else:
t_headle = '_pick.bin'
fileHandle = fileHandle if fileHandle[-len(t_headle):] == fileHandle else fileHandle + t_headle
with open(fileHandle, 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
residualNorm = problem.get_residualNorm()
problem_kwargs1 = get_problem_kwargs(**main_kwargs)
problem_kwargs = problem.get_kwargs()
problem_kwargs['matname'] = problem_kwargs1['matname']
problem_kwargs['bnodesHeadle'] = problem_kwargs1['bnodesHeadle']
problem_kwargs['belemsHeadle'] = problem_kwargs1['belemsHeadle']
problem.set_kwargs(**problem_kwargs)
print_case_info(**problem_kwargs)
problem.print_info()
save_vtk(problem)
return problem, residualNorm
# @profile
def view_matrix(m, **kwargs):
args = {
'vmin': None,
'vmax': None,
'title': ' ',
'cmap': None
}
for key, value in args.items():
if key in kwargs:
args[key] = kwargs[key]
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cax = ax.matshow(m,
origin='lower',
vmin=args['vmin'],
vmax=args['vmax'],
cmap=plt.get_cmap(args['cmap']))
fig.colorbar(cax)
plt.title(args['title'])
plt.show()
if __name__ == '__main__':
main_fun()
|
{
"content_hash": "ced099f340cb24f058c84714766f167b",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 165,
"avg_line_length": 39.693811074918564,
"alnum_prop": 0.5778762514360741,
"repo_name": "pcmagic/stokes_flow",
"id": "c258fb0b6a19cd72c5cc0f055b34e54fca2912f4",
"size": "12596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helixInPipe/helixInPipe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32833"
},
{
"name": "C++",
"bytes": "221"
},
{
"name": "CSS",
"bytes": "1645"
},
{
"name": "Fortran",
"bytes": "12772"
},
{
"name": "Gnuplot",
"bytes": "2957"
},
{
"name": "HTML",
"bytes": "22464"
},
{
"name": "JavaScript",
"bytes": "9553"
},
{
"name": "Jupyter Notebook",
"bytes": "326253745"
},
{
"name": "MATLAB",
"bytes": "82969"
},
{
"name": "Makefile",
"bytes": "6488"
},
{
"name": "Mathematica",
"bytes": "765914"
},
{
"name": "Objective-C",
"bytes": "793"
},
{
"name": "Python",
"bytes": "1404660"
}
],
"symlink_target": ""
}
|
import re
import subprocess
import sys
from collections import OrderedDict
from .source import Source
from .configuration import Configuration
import io
import codecs
try:
# Python 2.x and 3.x support for checking string types
basestring
unicode
except NameError:
basestring = str
unicode = str
class PDFKit(object):
"""
Main class that does all generation routine.
:param url_or_file: str - either a URL, a path to a file or a string containing HTML
to convert
:param type_: str - either 'url', 'file' or 'string'
:param options: dict (optional) with wkhtmltopdf options, with or w/o '--'
:param toc: dict (optional) - toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: str (optional) - url/filename with a cover html page
:param configuration: (optional) instance of pdfkit.configuration.Configuration()
"""
class ImproperSourceError(Exception):
"""Wrong source type for stylesheets"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def __init__(self, url_or_file, type_, options=None, toc=None, cover=None,
css=None, configuration=None, cover_first=False, verbose=False):
self.source = Source(url_or_file, type_)
self.configuration = (Configuration() if configuration is None
else configuration)
try:
self.wkhtmltopdf = self.configuration.wkhtmltopdf.decode('utf-8')
except AttributeError:
self.wkhtmltopdf = self.configuration.wkhtmltopdf
self.options = OrderedDict()
if self.source.isString():
self.options.update(self._find_options_in_meta(url_or_file))
self.environ = self.configuration.environ
if options is not None:
self.options.update(options)
self.toc = {} if toc is None else toc
self.cover = cover
self.cover_first = cover_first
self.verbose = verbose
self.css = css
self.stylesheets = []
def _genargs(self, opts):
"""
Generator of args parts based on options specification.
Note: Empty parts will be filtered out at _command generator
"""
for optkey, optval in self._normalize_options(opts):
yield optkey
if isinstance(optval, (list, tuple)):
assert len(optval) == 2 and optval[0] and optval[1], 'Option value can only be either a string or a (tuple, list) of 2 items'
yield optval[0]
yield optval[1]
else:
yield optval
def _command(self, path=None):
"""
Generator of all command parts
"""
if self.css:
self._prepend_css(self.css)
yield self.wkhtmltopdf
if not self.verbose:
self.options.update({'--quiet': ''})
for argpart in self._genargs(self.options):
if argpart:
yield argpart
if self.cover and self.cover_first:
yield 'cover'
yield self.cover
if self.toc:
yield 'toc'
for argpart in self._genargs(self.toc):
if argpart:
yield argpart
if self.cover and not self.cover_first:
yield 'cover'
yield self.cover
# If the source is a string then we will pipe it into wkhtmltopdf
# If the source is file-like then we will read from it and pipe it in
if self.source.isString() or self.source.isFileObj():
yield '-'
else:
if isinstance(self.source.source, basestring):
yield self.source.to_s()
else:
for s in self.source.source:
yield s
# If output_path evaluates to False append '-' to end of args
# and wkhtmltopdf will pass generated PDF to stdout
if path:
yield path
else:
yield '-'
def command(self, path=None):
return list(self._command(path))
@staticmethod
def handle_error(exit_code, stderr):
if exit_code == 0:
return
stderr_lines = stderr.splitlines()
# Sometimes wkhtmltopdf will exit with non-zero
# even if it finishes generation.
# If will display 'Done' in the second last line
if len(stderr_lines) > 1 and stderr.splitlines()[-2].strip() == 'Done':
return
if 'cannot connect to X server' in stderr:
raise IOError('%s\n'
'You will need to run wkhtmltopdf within a "virtual" X server.\n'
'Go to the link below for more information\n'
'https://github.com/JazzCore/python-pdfkit/wiki/Using-wkhtmltopdf-without-X-server' % stderr)
if 'Error' in stderr:
raise IOError('wkhtmltopdf reported an error:\n' + stderr)
error_msg = stderr or 'Unknown Error'
raise IOError("wkhtmltopdf exited with non-zero code {0}. error:\n{1}".format(exit_code, error_msg))
def to_pdf(self, path=None):
args = self.command(path)
if sys.platform == 'win32':
#hide cmd window
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
result = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environ,
startupinfo=startupinfo
)
else:
result = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environ
)
# If the source is a string then we will pipe it into wkhtmltopdf.
# If we want to add custom CSS to file then we read input file to
# string and prepend css to it and then pass it to stdin.
# This is a workaround for a bug in wkhtmltopdf (look closely in README)
if self.source.isString() or (self.source.isFile() and self.css):
input = self.source.to_s().encode('utf-8')
elif self.source.isFileObj():
input = self.source.source.read().encode('utf-8')
else:
input = None
stdout, stderr = result.communicate(input=input)
stderr = stderr or stdout or b""
stderr = stderr.decode('utf-8', errors='replace')
exit_code = result.returncode
self.handle_error(exit_code, stderr)
# Since wkhtmltopdf sends its output to stderr we will capture it
# and properly send to stdout
if '--quiet' not in args:
sys.stdout.write(stderr)
if not path:
return stdout
try:
with codecs.open(path, encoding='utf-8') as f:
# read 4 bytes to get PDF signature '%PDF'
text = f.read(4)
if text == '':
raise IOError('Command failed: %s\n'
'Check whhtmltopdf output without \'quiet\' '
'option' % ' '.join(args))
return True
except (IOError, OSError) as e:
raise IOError('Command failed: %s\n'
'Check whhtmltopdf output without \'quiet\' option\n'
'%s ' % (' '.join(args), e))
def _normalize_options(self, options):
""" Generator of 2-tuples (option-key, option-value).
When options spec is a list, generate a 2-tuples per list item.
:param options: dict {option name: value}
returns:
iterator (option-key, option-value)
- option names lower cased and prepended with
'--' if necessary. Non-empty values cast to str
"""
for key, value in list(options.items()):
if '--' not in key:
normalized_key = '--%s' % self._normalize_arg(key)
else:
normalized_key = self._normalize_arg(key)
if isinstance(value, (list, tuple)):
for optval in value:
yield (normalized_key, optval)
else:
normalized_value = '' if isinstance(value,bool) else value
yield (normalized_key, unicode(normalized_value) if value else value)
def _normalize_arg(self, arg):
return arg.lower()
def _style_tag_for(self, stylesheet):
return "<style>%s</style>" % stylesheet
def _prepend_css(self, path):
if self.source.isUrl() or isinstance(self.source.source, list):
raise self.ImproperSourceError('CSS files can be added only to a single '
'file or string')
if not isinstance(path, list):
path = [path]
css_data = []
for p in path:
with codecs.open(p, encoding="UTF-8") as f:
css_data.append(f.read())
css_data = "\n".join(css_data)
if self.source.isFile():
with codecs.open(self.source.to_s(), encoding="UTF-8") as f:
inp = f.read()
self.source = Source(
inp.replace('</head>', self._style_tag_for(css_data) + '</head>'),
'string')
elif self.source.isString():
if '</head>' in self.source.to_s():
self.source.source = self.source.to_s().replace(
'</head>', self._style_tag_for(css_data) + '</head>')
else:
self.source.source = self._style_tag_for(css_data) + self.source.to_s()
def _find_options_in_meta(self, content):
"""Reads 'content' and extracts options encoded in HTML meta tags
:param content: str or file-like object - contains HTML to parse
returns:
dict: {config option: value}
"""
if (isinstance(content, io.IOBase)
or content.__class__.__name__ == 'StreamReaderWriter'):
content = content.read()
found = {}
for x in re.findall('<meta [^>]*>', content):
if re.search('name=["\']%s' % self.configuration.meta_tag_prefix, x):
name = re.findall('name=["\']%s([^"\']*)' %
self.configuration.meta_tag_prefix, x)[0]
found[name] = re.findall('content=["\']([^"\']*)', x)[0]
return found
|
{
"content_hash": "11468e9d30699980d9b7121fe6e3fa7f",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 141,
"avg_line_length": 35.35643564356435,
"alnum_prop": 0.5511061327359283,
"repo_name": "JazzCore/python-pdfkit",
"id": "784aad08d0caf7eb0c9a8a631c243c40bee13e1f",
"size": "10737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfkit/pdfkit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32965"
},
{
"name": "Shell",
"bytes": "400"
}
],
"symlink_target": ""
}
|
import os
import sys
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../src"))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.viewcode",
"numpydoc",
"sphinx.ext.autosummary",
"doctest",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"svgutils"
copyright = u"2011, Bartosz Telenczuk"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are , 'sphinx.ext.autosummary'ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"nosidebar": False}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "svgutilsdoc"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"svgutils.tex",
u"svgutils Documentation",
u"Bartosz Telenczuk",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "svgutils", u"svgutils Documentation", [u"Bartosz Telenczuk"], 1)
]
|
{
"content_hash": "8e796c742def055f8e8215c1fc99b83b",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 80,
"avg_line_length": 31.636363636363637,
"alnum_prop": 0.6966954022988506,
"repo_name": "btel/svg_utils",
"id": "9302dc378d3588ca4ada4acb784203ff46866143",
"size": "7379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31174"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class TrackedResource(Resource):
"""The Resource definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: The Geo-location where the resource live
:type location: str
:param tags: Resource tags
:type tags: dict
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, location, tags=None):
super(TrackedResource, self).__init__()
self.location = location
self.tags = tags
|
{
"content_hash": "cba9efe3fd27b3fb2f73934afa87badf",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 27.4,
"alnum_prop": 0.5529197080291971,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "fe840d11b2b019cdf8d05f62208b32c55501f493",
"size": "1570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-servicebus/azure/mgmt/servicebus/models/tracked_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
}
|
from ratelimit import limits, RateLimitException
from tests import unittest, clock
class TestDecorator(unittest.TestCase):
@limits(calls=1, period=10, clock=clock)
def increment(self):
'''
Increment the counter at most once every 10 seconds.
'''
self.count += 1
@limits(calls=1, period=10, clock=clock, raise_on_limit=False)
def increment_no_exception(self):
'''
Increment the counter at most once every 10 seconds, but w/o rasing an
exception when reaching limit.
'''
self.count += 1
def setUp(self):
self.count = 0
clock.increment(10)
def test_increment(self):
self.increment()
self.assertEqual(self.count, 1)
def test_exception(self):
self.increment()
self.assertRaises(RateLimitException, self.increment)
def test_reset(self):
self.increment()
clock.increment(10)
self.increment()
self.assertEqual(self.count, 2)
def test_no_exception(self):
self.increment_no_exception()
self.increment_no_exception()
self.assertEqual(self.count, 1)
|
{
"content_hash": "ca1547a859c06f2f9bd7df357b9d7b65",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 26.363636363636363,
"alnum_prop": 0.6241379310344828,
"repo_name": "tomasbasham/ratelimit",
"id": "ceefdfdd636e1c2315929021d57463eee7fcb899",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/decorator_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8426"
}
],
"symlink_target": ""
}
|
from robot.errors import DataError
from robot.utils import is_dict_like
from .argumentvalidator import ArgumentValidator
class ArgumentResolver(object):
def __init__(self, argspec, resolve_named=True,
resolve_variables_until=None, dict_to_kwargs=False):
self._named_resolver = NamedArgumentResolver(argspec) \
if resolve_named else NullNamedArgumentResolver()
self._variable_replacer = VariableReplacer(resolve_variables_until)
self._dict_to_kwargs = DictToKwargs(argspec, dict_to_kwargs)
self._argument_validator = ArgumentValidator(argspec)
def resolve(self, arguments, variables=None):
positional, named = self._named_resolver.resolve(arguments)
positional, named = self._variable_replacer.replace(positional, named,
variables)
positional, named = self._dict_to_kwargs.handle(positional, named)
self._argument_validator.validate(positional, named,
dryrun=not variables)
return positional, named
class NamedArgumentResolver(object):
def __init__(self, argspec):
self._argspec = argspec
def resolve(self, arguments):
positional = []
named = {}
for arg in arguments:
if self._is_named(arg):
self._add_named(arg, named)
elif named:
self._raise_positional_after_named()
else:
positional.append(arg)
return positional, named
def _is_named(self, arg):
if not isinstance(arg, basestring) or '=' not in arg:
return False
name = arg.split('=')[0]
if self._is_escaped(name):
return False
if not self._argspec.supports_named:
return self._argspec.kwargs
return name in self._argspec.positional or self._argspec.kwargs
def _is_escaped(self, name):
return name.endswith('\\')
def _add_named(self, arg, named):
name, value = arg.split('=', 1)
name = self._convert_to_str_if_possible(name)
if name in named:
self._raise_multiple_values(name)
named[name] = value
def _convert_to_str_if_possible(self, name):
# Python 2.5 doesn't handle Unicode kwargs at all, so we will try to
# support it by converting to str if possible
try:
return str(name)
except UnicodeError:
return name
def _raise_multiple_values(self, name):
raise DataError("%s '%s' got multiple values for argument '%s'."
% (self._argspec.type, self._argspec.name, name))
def _raise_positional_after_named(self):
raise DataError("%s '%s' got positional argument after named arguments."
% (self._argspec.type, self._argspec.name))
class NullNamedArgumentResolver(object):
def resolve(self, arguments):
return arguments, {}
class DictToKwargs(object):
def __init__(self, argspec, enabled=False):
self._maxargs = argspec.maxargs
self._enabled = enabled and bool(argspec.kwargs)
def handle(self, positional, named):
if self._enabled and self._extra_arg_has_kwargs(positional, named):
named = positional.pop()
return positional, named
def _extra_arg_has_kwargs(self, positional, named):
if named or len(positional) != self._maxargs + 1:
return False
return is_dict_like(positional[-1], allow_java=True)
class VariableReplacer(object):
def __init__(self, resolve_until=None):
self._resolve_until = resolve_until
def replace(self, positional, named, variables=None):
# `variables` is None in dry-run mode and when using Libdoc
if variables:
positional = variables.replace_list(positional, self._resolve_until)
named = dict((name, variables.replace_scalar(value))
for name, value in named.items())
else:
positional = list(positional)
return positional, named
|
{
"content_hash": "fe027ef5bc3242d77431b39f83f5005a",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 80,
"avg_line_length": 35.427350427350426,
"alnum_prop": 0.6086851628468034,
"repo_name": "fiuba08/robotframework",
"id": "378ef1ba999bbe2db5da66d77c53b24b2f77d72c",
"size": "4753",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/robot/running/arguments/argumentresolver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "16539"
},
{
"name": "HTML",
"bytes": "1011996"
},
{
"name": "Java",
"bytes": "58737"
},
{
"name": "JavaScript",
"bytes": "159003"
},
{
"name": "Python",
"bytes": "1992779"
},
{
"name": "RobotFramework",
"bytes": "4288"
},
{
"name": "Shell",
"bytes": "883"
}
],
"symlink_target": ""
}
|
from papermill.cli import papermill
if __name__ == '__main__':
papermill()
|
{
"content_hash": "f5bf945d70957b044a3e53120ab3b8f5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 35,
"avg_line_length": 20,
"alnum_prop": 0.625,
"repo_name": "nteract/papermill",
"id": "c386c2ff143222504f39d3bf85bc254f4bf799a8",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "papermill/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "46018"
},
{
"name": "Python",
"bytes": "248159"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
}
|
import uuid
from django import template
from django.forms.widgets import Media
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from settings import PROGRESSBARUPLOAD_INCLUDE_JQUERY
register = template.Library()
@register.simple_tag
def progress_bar():
"""
progress_bar simple tag
return html5 tag to display the progress bar
and url of ajax function needed to get upload progress
in js/progress_bar.js file.
"""
progress_bar_tag = '<progress id="progressBar" ' \
'data-progress_bar_uuid="%s" value="0" max="100" ' \
'hidden></progress><div id="progressText"></div>' % (uuid.uuid4())
upload_progress_url = '<script>upload_progress_url = "%s"</script>' \
% (reverse('upload_progress'))
return mark_safe(progress_bar_tag + upload_progress_url)
@register.simple_tag
def progress_bar_media():
"""
progress_bar_media simple tag
return rendered script tag for javascript used by progress_bar
"""
if PROGRESSBARUPLOAD_INCLUDE_JQUERY:
js = ["http://code.jquery.com/jquery-1.8.3.min.js",]
else:
js = []
js.append("js/progress_bar.js")
m = Media(js=js)
return m.render()
|
{
"content_hash": "2235346f969ecd7705b0bcb5dd192c10",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 28.022727272727273,
"alnum_prop": 0.6682887266828873,
"repo_name": "foobarbecue/afterflight",
"id": "f75a2262207b19434ba5d3b7c28049e77aa78d32",
"size": "1233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afterflight/progressbarupload/templatetags/progress_bar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "122554"
},
{
"name": "HTML",
"bytes": "3366611"
},
{
"name": "JavaScript",
"bytes": "1748314"
},
{
"name": "PHP",
"bytes": "37169"
},
{
"name": "Python",
"bytes": "58593"
}
],
"symlink_target": ""
}
|
if ENABLE_FINANCIAL_AID:
db.define_table( 'fa',
# Idendtification:
# - Legal name => fa.person.first_name + fa.person.last_name
# - Address => fa.person.{address1,address2,city,state,country,zip_code}
# - email address => fa.person.email
# Registration:
# - registration type => fa.percon.attendee_type
db.Field( 'person', db.auth_user, default=auth.user_id, readable=True, writable=False),
db.Field('created_on','datetime',default=now, readable=False, writable=False),
db.Field('modified_on','datetime',default=now, readable=False, writable=False),
db.Field( 'registration_amount', 'boolean', default=False),
# Hotel Cost:
# - number of nights of assitance requested;
db.Field( 'hotel_nights', 'integer', default=0 ),
# - total amount requested; label: "Max 50% of room rate at Crowne Plaza x # nights;" labeled; validated if easy to update room rates.
db.Field( 'total_lodging_amount', 'double', default='0.00'),
db.Field( 'roommates', 'string', length=128, default=''),
# Transportation:
# - method of transportation / details;
# db.Field( 'method_of_transportation', 'string', default=''),
db.Field( 'transportation_details', 'text', default=''),
# - total amount requested; label: "If you want assistance with your transportation costs, please provide a rough estimate (to nearest US$100)
# of how much a round-trip will cost. Please update your request once final cost is known."
db.Field( 'transportation_amount', 'double', default='0.00', ),
# Total: - read-only field calculated from above 3 sections
# - registration dollar amount requested; (let applicant specify, as they can ask for just a portion)
db.Field( 'total_amount_requested', 'double', default='0.0'), # default = ATTENDEE_TYPE_COST[person.attendee_type]),
#
# Additional fileds:
# - minimum at. requested; label "In addition to the desired amount, state the minimum amount of aid you require, below
# which you will not be able to attend PyCon. If we are unable to allocate this minumum amount, we will decline your application
# and allocate the funds to others."
db.Field( 'minimum_amount_requested', 'double', default='0.00', ),
# - Rational " State why you should come to PyCon, and what you will be doing.
# We don't need an essay, but please provide a few sentences of explanation.
# Priority will be given to people who make significant contributions to PyCon
# and the Python community (e.g. students working on a task, conference speakers,
# sprint leaders, developers critical to a sprint, super-enthusiastic sprint newbies
# who will give 110% for their project, or people doing public service work with Python)."
db.Field( 'rationale', 'text', default='' ),
db.Field('status',default='pending',label=T("Status"), writable=False,readable=False),
db.Field( 'grant_amount', 'double', default='0.00', writable=False,readable=False),
migrate=migrate, fake_migrate=fake_migrate)
db.fa.person.requires=IS_IN_DB(db,'auth_user.id','%(last_name)s, %(first_name)s [%(id)s]')
db.fa.registration_amount.comment= T('(cost TBD)')
db.fa.total_lodging_amount.comment= T('(in ARS pesos)')
##db.fa.roommates.comment= XML(str(T('(%s)',A('instructions',_href='#roommates'))))
db.fa.transportation_details.comment = T('(dates, airports codes, bus stations, etc.)')
db.fa.transportation_amount.comment= T('(in ARS pesos)')
db.fa.total_amount_requested.comment= T('(in ARS pesos)')
db.fa.minimum_amount_requested.comment= T('(in ARS pesos)')
db.fa.rationale.comment= T('describe why you want to come to PyCon')
db.fa.status.writable=db.fa.status.readable=auth.has_membership('manager')
db.fa.status.requires=IS_IN_SET(['pending', 'approved', 'denied'])
db.fa.grant_amount.writable=db.fa.grant_amount.readable=auth.has_membership('manager')
#### ---< END: F/A forms >---
#### end fixup
body_template="""
Your Financial Aid Application has been %s.
Thank you; your updates will be reviewed.
Time: %s
Name: %s %s [#%s]
Address: %s
%s, %s %s (%s)
Email: %s
APPLICATION:
Registration Type: %s
Registration Amount: $ %4.2f
Number Nights: %d
Total Lodging Amount: $ %4.2f
Roommates: %s
TRANSPORTATION:
%s
Transportation Amount: $ %4.2f
TOTAL REQUEST: $ %5.2f
Minimum Reguest: $ %5.2f
RATIONALE:
%s
(This email was automatically generated by PyConAr2012 Registration System)
"""
def email_fa(mode):
fa=db(db.fa.person==auth.user.id).select()[0]
person=db(db.auth_user.id==auth.user.id).select()[0]
body=body_template % (mode, fa.modified_on,
person.first_name,person.last_name,person.id,
person.address,person.city,person.state,person.zip_code,person.country,
person.email,
person.attendee_type,
fa.registration_amount, fa.hotel_nights, fa.total_lodging_amount, fa.roommates,
fa.transportation_details, fa.transportation_amount,
fa.total_amount_requested, fa.minimum_amount_requested,
fa.rationale
)
if False: # careful! I've only just printed this to console, so only use for local testing
# you could add this:
# if request.env.http_host == '127.0.0.1:8000'
# or... output to the screen....
# response.headers['Content-Type']='text/plain'
# return str("t2.email(\n"+EMAIL_SENDER+"["+FA_EMAIL_TO+person.email+"]"+"\nsubject="+'PyCon FA Application Updated [#%s]' % fa.id+body+"\n)")
print "t2.email(\n",EMAIL_SENDER,[FA_EMAIL_TO,person.email],"\nsubject=",'PyCon FA Application Updated [#%s]' % fa.id,body,")"
else:
mail.send(to=person.email, cc=FA_EMAIL_TO,
subject='Financial Aid Application Updated [#%s]' % fa.id,
message=body)
def email_fa_select(query=db.fa.id>0):
for fa in db(query).select():
person=db(db.auth_user.id==fa.person).select()[0]
body=body_template % ("retrieved by a manager", fa.modified_on,
person.first_name,person.last_name,person.id,
person.address1,person.address2,person.city,person.state,person.zip_code,person.country,
person.email,
person.attendee_type,
fa.registration_amount, fa.hotel_nights, fa.total_lodging_amount, fa.roommates,
fa.transportation_details, fa.transportation_amount,
fa.total_amount_requested, fa.minimum_amount_requested,
fa.rationale
)
if False: # careful! I've only just printed this to console, so only use for local testing
# you could add this:
# if request.env.http_host == '127.0.0.1:8000'
# or... output to the screen....
# response.headers['Content-Type']='text/plain'
# return str("t2.email(\n"+EMAIL_SENDER+"["+FA_EMAIL_TO+"]"+"\nsubject="+'PyCon FA Application Updated [#%s]' % fa.id+ body+ "\n)")
print "t2.email(\n",EMAIL_SENDER,[FA_EMAIL_TO],"\nsubject=",'PyCon FA Application Updated [#%s]' % fa.id, body, ")"
else:
raise "Saraza"
mail.send(to=FA_EMAIL_TO,subject='PyCon FA Application Updated [#%s]' % fa.id, message=body)
|
{
"content_hash": "7f29489b3b0dec61dcb677720994d06b",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 154,
"avg_line_length": 55.63265306122449,
"alnum_prop": 0.5833944729762778,
"repo_name": "reingart/web2conf",
"id": "10ffdb053f4dc92d26c11cf9b267642bd1e45700",
"size": "8242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/db_fa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "305263"
},
{
"name": "JavaScript",
"bytes": "714541"
},
{
"name": "Python",
"bytes": "1243006"
},
{
"name": "Shell",
"bytes": "267"
}
],
"symlink_target": ""
}
|
import json
from google.appengine.ext import ndb
from controllers.account.account_base import AccountBaseHandler
from models.Caucus import Caucus
class CaucusInfoHandler(AccountBaseHandler):
@ndb.toplevel
def get(self, caucus_id):
caucus = Caucus.get_by_id(int(caucus_id))
self.template_values['caucus'] = caucus
self.render_template('account/caucus/caucus_details.html')
class CaucusCreateHandler(AccountBaseHandler):
@ndb.toplevel
def get(self):
self.render_template('account/caucus/caucus_new_edit.html')
@ndb.toplevel
def post(self):
name = self.request.get('name')
if Caucus.query(Caucus.name == name).count() > 0:
response = {'success': False, 'error_message': "A caucus already exists with the name {0}".format(name)}
self.response.write(json.dumps(response))
return
new_caucus = Caucus.create_caucus(name=name, user_account=self.user_account)
response = {'success': True, 'goto_url': '/account/caucus/{0}'.format(new_caucus.key.id())}
self.response.write(json.dumps(response))
class CaucusEditHandler(AccountBaseHandler):
@ndb.toplevel
def get(self, caucus_id):
caucus = Caucus.get_by_id(int(caucus_id))
self.template_values['caucus'] = caucus
self.render_template('account/caucus/caucus_new_edit.html')
@ndb.toplevel
def put(self, caucus_id):
caucus = Caucus.get_by_id(int(caucus_id))
name = self.request.get('name')
if Caucus.query(Caucus.name == name).count() > 0:
response = {'success': False, 'error_message': "A caucus already exists with the name {0}".format(name)}
self.response.write(json.dumps(response))
return
caucus.edit_caucus(name=name)
response = {'success': True, 'goto_url': '/account/caucus/{0}'.format(caucus.key.id())}
self.response.write(json.dumps(response))
class CaucusJoinHandler(AccountBaseHandler):
@ndb.toplevel
def post(self, caucus_id):
caucus = Caucus.get_by_id(int(caucus_id))
caucus.add_participant(self.user_account)
response = {'success': True}
self.response.write(json.dumps(response))
|
{
"content_hash": "099b35727f2296475c495a28ba1e3380",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 116,
"avg_line_length": 37.31666666666667,
"alnum_prop": 0.6556498436802144,
"repo_name": "PatrickLaban/OnlineCaucus",
"id": "6137bcd6d3eb63444c86efd73f230a693075267b",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/account/caucus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103"
},
{
"name": "HTML",
"bytes": "10207"
},
{
"name": "Python",
"bytes": "14133"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Savedactivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Cardial Burning', editable=False, max_length=7)),
('detail', models.TextField(default='run for 100 miles')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activities', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Savedworkouts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('overview', models.CharField(default='Standard Workout', editable=False, max_length=7)),
('pic', models.ImageField(blank=True, null=True, upload_to='')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='workouts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Userwithworkouts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"content_hash": "7da539984566ea454ba4a066e64aed32",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 146,
"avg_line_length": 42.714285714285715,
"alnum_prop": 0.6092530657748049,
"repo_name": "TrainerProjectF13X/trainer_web_app",
"id": "293ffa3e2616057592916d5e75d59a7e3cfa8a26",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/Dev",
"path": "workouts/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "198541"
},
{
"name": "HTML",
"bytes": "12643"
},
{
"name": "JavaScript",
"bytes": "522554"
},
{
"name": "Python",
"bytes": "46562"
}
],
"symlink_target": ""
}
|
__author__ = "Simone Campagna"
import re
from .py23 import BASE_STRING
class Values(object):
__re_split__ = re.compile("[,x]")
__default_separator__ = 'x'
def __init__(self, init):
if isinstance(init, Values):
values = init._values
elif isinstance(init, (list, tuple)):
values = init
elif isinstance(init, BASE_STRING):
values = self._from_string(init)
elif hasattr(init, '__iter__'):
values = init
else:
raise ValueError("cannot make a {c} from {t} object {o!r}".format(
c=self.__class__.__name__,
t=type(init).__name__,
o=init))
self._values = tuple(values)
def values(self):
return self._values
def rank(self):
return len(self._values)
def split_first(self):
return self._values[0], self.__class__(self._values[1:])
def __iter__(self):
return iter(self._values)
@classmethod
def _item_from_string(cls, value):
return int(value)
@classmethod
def _from_string(cls, value):
values = []
for item in cls.__re_split__.split(value):
if item:
values.append(cls._item_from_string(item))
return values
def __len__(self):
return len(self._values)
def __str__(self):
return str(self.__default_separator__.join(str(i) for i in self._values))
def __repr__(self):
return "{c}({s!r})".format(c=self.__class__.__name__, s=self._values)
def __getitem__(self, index):
return self._values[index]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._values == other._values
else:
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return self._values != other._values
else:
return False
|
{
"content_hash": "64e5f9e36a96ad4001f095f731e19db5",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 27.123287671232877,
"alnum_prop": 0.5232323232323233,
"repo_name": "simone-campagna/rubik",
"id": "868db4d116a7bcebb26cbada11b552e5285a461f",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rubik/values.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "546787"
}
],
"symlink_target": ""
}
|
import logging
logger = logging.getLogger(__name__)
def parse_config(description_str):
output_dict = {}
# ##!!##logger.debug('parsing abstraction parameters')
# split the description into lines by breaking at "\n"
description_arr = description_str.splitlines()
for l in description_arr:
# assume it as a comment and move on
if l[0] == '#':
continue
# split each line on "="
parameter_def = l.split('=')
# strip leading and trailing whitespaces
parameter_name = parameter_def[0].strip()
parameter_val = parameter_def[1].strip()
output_dict[parameter_name] = parameter_val
# store the RHS type of the output dict, i.e., all RHS are strings
output_dict['type'] = 'string'
return output_dict
|
{
"content_hash": "0917fbc341ab4cb37c44a3f806aad62e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 23.17142857142857,
"alnum_prop": 0.6177558569667078,
"repo_name": "zutshi/S3CAMX",
"id": "5f39d39366a51bf9f8984998f999f82b4e06b5ce",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/configparser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6988"
},
{
"name": "Matlab",
"bytes": "41830"
},
{
"name": "Python",
"bytes": "621726"
},
{
"name": "Shell",
"bytes": "11798"
}
],
"symlink_target": ""
}
|
"""
Flask routing
"""
from flask import Flask, request, session, send_from_directory, render_template
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__, static_path="/")
app.wsgi_app = ProxyFix(app.wsgi_app)
import api
import json
import mimetypes
import os.path
from datetime import datetime
from api.common import WebSuccess, WebError
from api.annotations import api_wrapper, require_login, require_teacher, require_admin, check_csrf
from api.annotations import block_before_competition, block_after_competition
from api.annotations import log_action
import api.routes.autogen
import api.routes.user
import api.routes.team
import api.routes.stats
import api.routes.admin
import api.routes.group
import api.routes.problem
import api.routes.achievements
log = api.logger.use(__name__)
session_cookie_domain = None
session_cookie_path = "/"
session_cookie_name = "flask"
secret_key = ""
def config_app(*args, **kwargs):
"""
Return the app object configured correctly.
This needed to be done for gunicorn.
"""
app.secret_key = secret_key
app.config["SESSION_COOKIE_DOMAIN"] = session_cookie_domain
app.config["SESSION_COOKIE_PATH"] = session_cookie_path
app.config["SESSION_COOKIE_NAME"] = session_cookie_name
app.register_blueprint(api.routes.autogen.blueprint, url_prefix="/api/autogen")
app.register_blueprint(api.routes.user.blueprint, url_prefix="/api/user")
app.register_blueprint(api.routes.team.blueprint, url_prefix="/api/team")
app.register_blueprint(api.routes.stats.blueprint, url_prefix="/api/stats")
app.register_blueprint(api.routes.admin.blueprint, url_prefix="/api/admin")
app.register_blueprint(api.routes.group.blueprint, url_prefix="/api/group")
app.register_blueprint(api.routes.problem.blueprint, url_prefix="/api/problems")
app.register_blueprint(api.routes.achievements.blueprint, url_prefix="/api/achievements")
api.logger.setup_logs({"verbose": 2})
return app
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Methods', 'GET, POST')
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, *')
response.headers.add('Cache-Control', 'no-cache')
response.headers.add('Cache-Control', 'no-store')
if api.auth.is_logged_in():
if 'token' in session:
response.set_cookie('token', session['token'])
else:
csrf_token = api.common.token()
session['token'] = csrf_token
response.set_cookie('token', csrf_token)
# JB: This is a hack. We need a better solution
if request.path[0:19] != "/api/autogen/serve/":
response.mimetype = 'appication/json'
return response
@app.route('/api/time', methods=['GET'])
@api_wrapper
def get_time():
return WebSuccess(data=int(datetime.utcnow().timestamp()))
|
{
"content_hash": "aa498a52e1ced24972e1dd179cba16ef",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 98,
"avg_line_length": 34.54117647058823,
"alnum_prop": 0.7173024523160763,
"repo_name": "alpire/picoCTF-web",
"id": "b216fee2186752fdef5fa6148692ffa141bdbc47",
"size": "2936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/app.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7236"
},
{
"name": "CoffeeScript",
"bytes": "79997"
},
{
"name": "HTML",
"bytes": "46100"
},
{
"name": "JavaScript",
"bytes": "21380"
},
{
"name": "Python",
"bytes": "224789"
},
{
"name": "Shell",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
def get_json(suffix):
import urllib, json
url_object = urllib.urlopen('http://api.pugetsound.onebusaway.org/api/where/' + suffix)
return json.loads(url_object.read())
def test_sql(cnx):
from datetime import datetime, date
cursor = cnx.cursor()
test_insert = ("INSERT INTO test (word, number, date) "
"VALUES (%(word_entry)s, %(number_entry)s, %(date_entry)s)")
test_data = {
'word_entry': 'TestData',
'number_entry': 17,
'date_entry': datetime.now().date()
}
cursor.execute(test_insert, test_data)
cnx.commit()
def apiRequest():
print('dummy')
def inputData():
"""
currently puts data in a localhost db
"""
import mysql.connector
config = {
'user': 'testUser',
'password': 'anything',
'host': '127.0.0.1',
'database': 'TestOBA'
}
try:
cnx = mysql.connector.connect(**config)
test_sql(cnx)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
if __name__ == "__main__":
inputData()
|
{
"content_hash": "cc354628d4362dc8840b29ccb9cfd08c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 91,
"avg_line_length": 25.30188679245283,
"alnum_prop": 0.5712155108128263,
"repo_name": "rohanaras/bus_bunching",
"id": "1ca68092101829c51ad61502a84476733374a3e6",
"size": "1341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_collection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5216"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import json
import jsonpath_rw
import six
import orjson
from st2common.util import db as db_util
from st2common.constants.meta import yaml_safe_load
from st2common.constants.meta import yaml_safe_dump
__all__ = [
"from_json_string",
"from_yaml_string",
"json_escape",
"jsonpath_query",
"to_complex",
"to_json_string",
"to_yaml_string",
]
def from_json_string(value):
return orjson.loads(six.text_type(value))
def from_yaml_string(value):
return yaml_safe_load(six.text_type(value))
def to_json_string(value, indent=None, sort_keys=False, separators=(",", ": ")):
value = db_util.mongodb_to_python_types(value)
options = {}
if indent is not None:
options["indent"] = indent
if sort_keys is not None:
options["sort_keys"] = sort_keys
if separators is not None:
options["separators"] = separators
return json.dumps(value, **options)
def to_yaml_string(value, indent=None, allow_unicode=True):
value = db_util.mongodb_to_python_types(value)
options = {"default_flow_style": False}
if indent is not None:
options["indent"] = indent
if allow_unicode is not None:
options["allow_unicode"] = allow_unicode
return yaml_safe_dump(value, **options)
def json_escape(value):
"""Adds escape sequences to problematic characters in the string
This filter simply passes the value to json.dumps
as a convenient way of escaping characters in it
However, before returning, we want to strip the double
quotes at the ends of the string, since we're not looking
for a valid JSON value at the end, just conveniently using
this function to do the escaping. The value could be any
arbitrary value
"""
return json.dumps(value).strip('"')
def jsonpath_query(value, query):
"""Extracts data from an object `value` using a JSONPath `query`.
:link: https://github.com/kennknowles/python-jsonpath-rw
:param value: a object (dict, array, etc) to query
:param query: a JSONPath query expression (string)
:returns: the result of the query executed on the value
:rtype: dict, array, int, string, bool
"""
expr = jsonpath_rw.parse(query)
matches = [match.value for match in expr.find(value)]
if not matches:
return None
return matches
def to_complex(value):
return json.dumps(value)
# Magic string to which None type is serialized when using use_none filter
NONE_MAGIC_VALUE = "%*****__%NONE%__*****%"
def use_none(value):
return NONE_MAGIC_VALUE if value is None else value
|
{
"content_hash": "0e9ff965c90e161a31dda4a2f10aec39",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 80,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.6778115501519757,
"repo_name": "StackStorm/st2",
"id": "e314040c80362eee34398bb3d2c1d37f2d38888c",
"size": "3260",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/st2common/expressions/functions/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
"""
Test calling an expression without a target.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCalculatorMode(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test__calculator_mode(self):
"""Test calling expressions in the dummy target."""
self.expect("expression 11 + 22", "11 + 22 didn't get the expected result", substrs=["33"])
# Now try it with a specific language:
self.expect("expression -l c -- 11 + 22", "11 + 22 didn't get the expected result", substrs=["33"])
|
{
"content_hash": "24378c52f18209f70e4f0d6f483dc226",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 107,
"avg_line_length": 28.434782608695652,
"alnum_prop": 0.6773700305810397,
"repo_name": "endlessm/chromium-browser",
"id": "9cd3c8170470479515d30e52fd8dbf9403a10301",
"size": "654",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/commands/expression/calculator_mode/TestCalculatorMode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Controller class that auto detects the platform
returning the appropriate class as controller
"""
from .controller import Controller
try:
# Attempt to load RPi Module
import RPi.GPIO as GPIO
from .rpi import rpi as Controller
except:
try:
# Attempt to load Beagle Bone Module
import Adafruit_BBIO.GPIO as GPIO
from .bb import bb as Controller
except:
# Fall back to Mock Controller
from .mock import mock as Controller
|
{
"content_hash": "7ebea32a3c3d4defb35b3f25d1c68aaf",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 28.41176470588235,
"alnum_prop": 0.6935817805383023,
"repo_name": "3dinfluence/opensprinklerlib",
"id": "430740a0f1a9a38bca9b7810c030288e875992ce",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opensprinklerlib/controller/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15864"
}
],
"symlink_target": ""
}
|
class CloudbaseInitException(Exception):
pass
|
{
"content_hash": "ca629dbd955b6a548bf3d1ca9c84cb20",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 25,
"alnum_prop": 0.8,
"repo_name": "cernops/cloudbase-init",
"id": "147611b979f24b24d25f7525cd98237d7f081eb7",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudbaseinit/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
def read_checksum(filehandle):
# We expect the comment to be at the beginning of the file.
data = filehandle.read(2048)
comment_key = 'tEXtchecksum\x00'
comment_pos = data.find(comment_key)
if comment_pos == -1:
return
checksum_pos = comment_pos + len(comment_key)
return data[checksum_pos:checksum_pos + 32]
|
{
"content_hash": "924ddbea8f7c3b59054c3420d9c00c9f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 63,
"avg_line_length": 34.6,
"alnum_prop": 0.6705202312138728,
"repo_name": "mogoweb/webkit_for_android5.1",
"id": "70a0502b73bf758eda51642fc16f9b9c1ed4d0af",
"size": "1899",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "webkit/Tools/Scripts/webkitpy/layout_tests/read_checksum_from_png.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "6772"
},
{
"name": "Assembly",
"bytes": "26025"
},
{
"name": "Awk",
"bytes": "2800"
},
{
"name": "Batchfile",
"bytes": "57337"
},
{
"name": "C",
"bytes": "7713030"
},
{
"name": "C++",
"bytes": "153178707"
},
{
"name": "CMake",
"bytes": "192330"
},
{
"name": "CSS",
"bytes": "483041"
},
{
"name": "Common Lisp",
"bytes": "9920"
},
{
"name": "DIGITAL Command Language",
"bytes": "5243"
},
{
"name": "DTrace",
"bytes": "1931"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "HTML",
"bytes": "14998422"
},
{
"name": "Java",
"bytes": "1522083"
},
{
"name": "JavaScript",
"bytes": "18008829"
},
{
"name": "Lex",
"bytes": "42554"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "M4",
"bytes": "49839"
},
{
"name": "Makefile",
"bytes": "476166"
},
{
"name": "Module Management System",
"bytes": "9756"
},
{
"name": "Objective-C",
"bytes": "2798053"
},
{
"name": "Objective-C++",
"bytes": "7846322"
},
{
"name": "PHP",
"bytes": "66595"
},
{
"name": "Perl",
"bytes": "1130475"
},
{
"name": "Perl 6",
"bytes": "445215"
},
{
"name": "Python",
"bytes": "5503045"
},
{
"name": "QML",
"bytes": "3331"
},
{
"name": "QMake",
"bytes": "294800"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Roff",
"bytes": "273562"
},
{
"name": "Ruby",
"bytes": "81928"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "488223"
},
{
"name": "Yacc",
"bytes": "153801"
},
{
"name": "xBase",
"bytes": "328"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, url_for, render_template, jsonify
import ConfigParser
import urllib, json
import serial
import subprocess
config = ConfigParser.RawConfigParser()
config.read('defaults.cfg')
WEATHER_API_KEY = config.get('openweathermap', 'api_key')
CITY_ID = config.get('openweathermap', 'city_id')
WEATHER_URL = config.get('openweathermap', 'url')
WEATHER_UNITS = config.get('openweathermap', 'units')
API_PARAMS = "?id=" + CITY_ID + "&units=" + WEATHER_UNITS + "&appid=" + WEATHER_API_KEY
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/weather')
def weather():
response = urllib.urlopen(WEATHER_URL + "/weather" + API_PARAMS)
data = json.loads(response.read())
return jsonify(data)
@app.route('/api/forecast')
def forecast():
response = urllib.urlopen(WEATHER_URL + "/forecast" + API_PARAMS)
data = json.loads(response.read())
return jsonify(data)
@app.route('/api/outside_temp')
def outside_temp():
response = urllib.urlopen('http://thermo/data.json')
data = json.loads(response.read())
return jsonify(data)
|
{
"content_hash": "d72c652e3b2d7273b54e38bcc6bfee2e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 87,
"avg_line_length": 26.488372093023255,
"alnum_prop": 0.6900790166812993,
"repo_name": "asommer70/mirror-pila",
"id": "2bc526b491e5fe681753cc945f91a6e4c8c4cdd0",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1234"
},
{
"name": "CSS",
"bytes": "3341"
},
{
"name": "HTML",
"bytes": "4891"
},
{
"name": "JavaScript",
"bytes": "174243"
},
{
"name": "Makefile",
"bytes": "70"
},
{
"name": "Python",
"bytes": "3025"
},
{
"name": "Shell",
"bytes": "5315"
},
{
"name": "Vim script",
"bytes": "1832"
}
],
"symlink_target": ""
}
|
import ctypes
import gi
gi.require_version('WebKit', '3.0')
from gi.repository import Gtk, WebKit, Soup
webView = WebKit.WebView()
webView.open('http://html5.grooveshark.com/')
window = Gtk.Window()
icon_theme=Gtk.IconTheme.get_default()
if icon_theme.lookup_icon("gnome-multimedia", 96, 0) : window.set_icon(icon_theme.load_icon("gnome-multimedia", 96, 0))
window.set_size_request(1000,600)
window.set_title('Groove-Play - Simple Grooveshark Player')
window.connect("delete-event", Gtk.main_quit)
s = Gtk.ScrolledWindow()
s.add(webView)
window.add(s)
cookiejar = Soup.CookieJarText.new("cookies/cookies.grp", False)
cookiejar.set_accept_policy(Soup.CookieJarAcceptPolicy.ALWAYS)
session = WebKit.get_default_session()
session.add_feature(cookiejar)
window.show_all()
Gtk.main();
|
{
"content_hash": "52cc9ad8c2ca5926199259fa16502340",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 119,
"avg_line_length": 24.71875,
"alnum_prop": 0.7534766118836915,
"repo_name": "dbtek/groove-play",
"id": "6d684eb0b37fbe516aeaf96dd7747703cb3b1695",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "play.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.forms.widgets import RadioFieldRenderer
from django.forms.utils import flatatt
from django.utils.html import escape, format_html, format_html_join, smart_urlquote
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
from django.utils import six
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n'
% (name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), static('admin/')))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{0} {1}<br />{2} {3}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{0}>\n{1}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{0}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.to
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse('admin:%s_%s_changelist' %
(rel_to._meta.app_label,
rel_to._meta.model_name),
current_app=self.admin_site.name)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> '
% (related_url, url, name))
extra.append('<img src="%s" width="16" height="16" alt="%s" /></a>'
% (static('admin/img/selector-search.gif'), _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
return url_params_from_lookup_dict(self.rel.limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.to in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def media(self):
return self.widget.media
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import TO_FIELD_VAR
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.model_name)
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
url_params = '?%s=%s' % (TO_FIELD_VAR, self.rel.get_related_field().name)
# TODO: "add_id_" is hard-coded here. This should instead use the
# correct API to determine the ID dynamically.
output.append('<a href="%s%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> '
% (related_url, url_params, name))
output.append('<img src="%s" width="10" height="10" alt="%s"/></a>'
% (static('admin/img/icon_addlink.gif'), _('Add Another')))
return mark_safe(''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{0} <a{1}>{2}</a><br />{3} {4}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
|
{
"content_hash": "64bd68a3d35c7812977098f726070eb8",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 137,
"avg_line_length": 40.26488095238095,
"alnum_prop": 0.60300096089881,
"repo_name": "tastynoodle/django",
"id": "2d50f47396e1432b04d9bee6166ac3e006b0625e",
"size": "13529",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/contrib/admin/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import random
def gen_random():
index = random.randint(0, len(quotes) - 1)
return quotes[index]
quotes = ('Happiness can be found, even in the darkest of times, if one only remembers to turn on the light. - Albus Dumbledore',
'Indeed, your failure to understand that there are things much worse than death has always been your greatest weakness. - Albus Dumbledore to Voldemort',
'If you want to know what a man’s like, take a good look at how he treats his inferiors, not his equals. - Sirius Black',
'Curiosity is not a sin… But we should exercise caution with our curiosity… yes, indeed. - Albus Dumbledore',
'In dreams, we enter a world that’s entirely our own. - Albus Dumbledore',
'We did it, we bashed them, wee Potter’s the one, and Voldy’s gone moldy, so now let’s have fun! - Peeves’ Victory Song',
'To the well-organized mind, death is but the next great adventure. - Albus Dumbledore',
'We must try not to sink beneath our anguish, Harry, but battle on. - Albus Dumbledore',
'Fear of a name increases fear of the thing itself. - Albus Dumbledore',
'You sort of start thinking anything’s possible if you’ve got enough nerve. - Ginny Weasley',
'Hearing voices no one else can hear isn’t a good sign, even in the wizarding world. - Ron to Harry',
'It is our choices… that show what we truly are, far more than our abilities. - Albus Dumbledore',
'Never trust anything that can think for itself if you can’t see where it keeps its brain. - Arthur Weasley',
'There is no good or evil: only power and those too weak to seek it. - Quirinus Quirrell',
'Indifference and neglect often do much more damage than outright dislike. - Albus Dumbledore',
'The truth is a beautiful and terrible thing, and should therefore be treated with caution. - Albus Dumbledore',
'Youth cannot know how age thinks and feels. But old men are guilty if they forget what it was to be young. - Albus Dumbledore',
'Harry, suffering like this proves you are still a man! This pain is part of being human … the fact that you can feel pain \
like this is your greatest strength. - Order of the Pheonix',
'Numbing the pain for a while will make it worse when you finally feel it. - Albus Dumbledore',
'An invisible barrier separated him from the rest of the world. He was – he had always been a marked man. - Order of the Pheonix',
'He chose the boy he thought most likely to be a danger to him … and notice this, Harry. He chose, not the pureblood \
(which according to his creed, is the only kind of wizard worth being or knowing), but the half-blood, like himself. \
He saw himself in you before he had ever seen you, and in marking you with that scar, he did not kill you, as he intended, \
but gave you powers, and a future, which have fitted you to escape him not once, but four times so far. - Dumbledore to Harry',
'The mind is not a book, to be opened at will and examined at leisure. Thoughts are not etched on the inside of skulls, to be \
perused by an invader. The mind is a complex and many-layered thing. - Severus Snape',
'As much money and life as you could want! The two things most human beings would choose above all – the trouble is, \
humans do have a knack of choosing precisely those things that are worst for them. - Albus Dumbledore',
'It takes a great deal of bravery to stand up to our enemies, but just as much to stand up to our friends. - Albus Dumbledore',
'When you have seen as much of life as I have, you will not underestimate the power of obsessive love. - Albus Dumbledore',
'Age is foolish and forgetful when it underestimates youth. - Dumbledore to Harry',
'People find it far easier to forgive others for being wrong than being right. - Albus Dumbledore',
'Ah, music. A magic beyond all we do here! - Albus Dumbledore',
'It is the unknown we fear when we look upon death and darkness, nothing more. - Albus Dumbledore',
'We must try not to sink beneath our anguish, Harry, but battle on. - Albus Dumbledore',
'At these times… I use the Pensieve. One simply siphons the excess thoughts from one’s mind, pours them into the basin, \
and examines them at one’s leisure. - Albus Dumbledore',
'Where your treasure is, there will your heart be also. - Albus Dumbledore',
'The flesh reflects the madness within. - Remus',
'The best of us must sometimes eat our words. - Albus Dumbledore',
'Wit beyond measure is man\'s greatest treasure. - Luna',
'I can teach you how to bewitch the mind and ensnare the senses. - Severus Snape',
'I can make bad things happen to people who annoy me. I can make them hurt if I want to. - Tom Riddle',
'It does not do to dwell on dreams and forget to live... - Albus Dumbledore',
'To have been loved so deeply...will give us some protection forever. - Albus Dumbledore',
'You fail to recognize that it matters not what someone is born, but what they grow to be. - Albus Dumbledore',
'Books! And cleverness! There are more important things--friendship and bravery... - Hermione',
'I open at the close. - Albus Dumbledore',
'The ones we love never truly leave us. - Sirius',
'The Dark Lord shall rise again. - Wormtail',
'I solemnly swear that I\'m up to no good. - Fred and George',
'I don\'t go looking for trouble. Trouble usually finds me. - Harry',
'The usual rules do not seem to apply to you... - Snape',
'I have played my part well. - Snape',
'Always... - Severus Snape'
)
|
{
"content_hash": "787fb8382d0c5f1f6cbc53e4dadcc69f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 163,
"avg_line_length": 93.5079365079365,
"alnum_prop": 0.6761161093193007,
"repo_name": "OrkoHunter/PotterScript",
"id": "0a9296121aa145a3b702ebe9bfdb5d6154082b87",
"size": "5931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "potterscript/quotes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14082"
}
],
"symlink_target": ""
}
|
"""SoundCloud directive for reStructuredText."""
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
"""Plugin for soundclound directive."""
name = "rest_soundcloud"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
directives.register_directive('soundcloud', SoundCloud)
directives.register_directive('soundcloud_playlist', SoundCloudPlaylist)
return super(Plugin, self).set_site(site)
CODE = ("""<iframe width="{width}" height="{height}"
scrolling="no" frameborder="no"
src="https://w.soundcloud.com/player/?url=http://api.soundcloud.com/{preslug}/"""
"""{sid}">
</iframe>""")
class SoundCloud(Directive):
"""reST extension for inserting SoundCloud embedded music.
Usage:
.. soundcloud:: <sound id>
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
'width': directives.positive_int,
'height': directives.positive_int,
}
preslug = "tracks"
def run(self):
"""Run the soundcloud directive."""
self.check_content()
options = {
'sid': self.arguments[0],
'width': 600,
'height': 160,
'preslug': self.preslug,
}
options.update(self.options)
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
"""Emit a deprecation warning if there is content."""
if self.content: # pragma: no cover
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
class SoundCloudPlaylist(SoundCloud):
"""reST directive for SoundCloud playlists."""
preslug = "playlists"
|
{
"content_hash": "6c878cadb81c4738b70a6bee0781256d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 81,
"avg_line_length": 27.985915492957748,
"alnum_prop": 0.6039255158530448,
"repo_name": "atiro/nikola",
"id": "2577ff1d500e578872d8974b9ef420b00cad1bb0",
"size": "2012",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile/rest/soundcloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18264"
},
{
"name": "JavaScript",
"bytes": "24667"
},
{
"name": "Python",
"bytes": "1064663"
},
{
"name": "Shell",
"bytes": "3076"
},
{
"name": "XSLT",
"bytes": "3527"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.core.checks import Error
from django.db import connections, models
from django.test import SimpleTestCase, mock
from django.test.utils import isolate_apps
def dummy_allow_migrate(db, app_label, **hints):
# Prevent checks from being run on the 'other' database, which doesn't have
# its check_field() method mocked in the test.
return db == 'default'
@isolate_apps('invalid_models_tests')
class BackendSpecificChecksTests(SimpleTestCase):
@mock.patch('django.db.models.fields.router.allow_migrate', new=dummy_allow_migrate)
def test_check_field(self):
""" Test if backend specific checks are performed. """
error = Error('an error')
class Model(models.Model):
field = models.IntegerField()
field = Model._meta.get_field('field')
with mock.patch.object(connections['default'].validation, 'check_field', return_value=[error]):
errors = field.check()
self.assertEqual(errors, [error])
|
{
"content_hash": "d517c260d9ed884e94e4508537696cd2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 103,
"avg_line_length": 35.46666666666667,
"alnum_prop": 0.6738721804511278,
"repo_name": "yephper/django",
"id": "d56f53df0ded24bcf09fdd14ec51a549189b2b64",
"size": "1091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/invalid_models_tests/test_backend_specific.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_token_review import V1beta1TokenReview
class TestV1beta1TokenReview(unittest.TestCase):
""" V1beta1TokenReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1TokenReview(self):
"""
Test V1beta1TokenReview
"""
model = kubernetes.client.models.v1beta1_token_review.V1beta1TokenReview()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "bd3d1542ee74f7f28937baa8a95c540b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 21.65,
"alnum_prop": 0.6997690531177829,
"repo_name": "skuda/client-python",
"id": "d4f820e7998343b1c53d62ac1360dd725c2862ae",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1beta1_token_review.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
}
|
import bayessb
from pysb.examples.robertson import model
from pysb.integrate import odesolve
import numpy
import matplotlib.pyplot as plt
import sys
scenario = 2
if len(sys.argv) > 1:
scenario = int(sys.argv[1])
seed = 2
random = numpy.random.RandomState(seed)
sigma = 0.1;
ntimes = 20;
tspan = numpy.linspace(0, 40, ntimes);
ysim = odesolve(model, tspan)
ysim_array = ysim.view(float).reshape(len(ysim), -1)
yspecies = ysim_array[:, :len(model.species)]
ydata = yspecies * (random.randn(*yspecies.shape) * sigma + 1);
ysim_max = yspecies.max(0)
ydata_norm = ydata / ysim_max
def likelihood(mcmc, position):
yout = mcmc.simulate(position)
yout_norm = yout / ysim_max
if scenario == 3:
# fit to "perfect" data
ret = numpy.sum((yspecies / ysim_max - yout_norm) ** 2 / (2 * sigma ** 2))
else:
# fit to noisy data
if scenario == 5:
# fit to first two species
ret = numpy.sum((ydata_norm[:,0:2] - yout_norm[:,0:2]) ** 2 / (2 * sigma ** 2))
else:
# fit to all species
ret = numpy.sum((ydata_norm - yout_norm) ** 2 / (2 * sigma ** 2))
return ret
def prior(mcmc, position):
if scenario == 1:
est = [1e-2, 1e7, 1e4, 1, 1, 1]
elif scenario in (2, 3, 5):
est = [1e-2, 1e7, 1e4]
elif scenario == 4:
est = [1e-2, 1e7]
mean = numpy.log10(est)
var = 10
return numpy.sum((position - mean) ** 2 / ( 2 * var))
def step(mcmc):
if mcmc.iter % 20 == 0:
print 'iter=%-5d sigma=%-.3f T=%-.3f acc=%-.3f, lkl=%g prior=%g post=%g' % \
(mcmc.iter, mcmc.sig_value, mcmc.T, float(mcmc.acceptance)/(mcmc.iter+1), mcmc.accept_likelihood,
mcmc.accept_prior, mcmc.accept_posterior)
def print_fit(position):
new_values = 10 ** position
print
print '%-10s %-12s %-12s %s' % ('parameter', 'actual', 'fitted', 'log10(fit/actual)')
for param, new_value in zip(opts.estimate_params, new_values):
change = numpy.log10(new_value / param.value)
values = (param.name, param.value, new_value, change)
print '%-10s %-12.2g %-12.2g %-+6.2f' % values
def plot_fit(position):
plt.figure()
colors = ('r', 'g', 'b')
labels = ('A', 'B', 'C')
real_lines = plt.plot(tspan, yspecies / ysim_max)
data_lines = plt.plot(tspan, ydata_norm)
sim_lines = plt.plot(tspan, mcmc.simulate(position) / ysim_max)
for rl, dl, sl, c, l in zip(real_lines, data_lines, sim_lines, colors,
labels):
rl.set_color(c)
dl.set_color(c)
sl.set_color(c)
rl.set_linestyle('--')
dl.set_linestyle(':')
dl.set_marker('x')
sl.set_label(l)
plt.legend(loc='lower right')
plt.show()
print "Running scenario", scenario
print "=================="
opts = bayessb.MCMCOpts()
opts.model = model
opts.tspan = tspan
# Note: actual parameter values are [4e-2, 3e7, 1e4, 1, 0, 0]
# A few estimation scenarios:
if scenario == 1:
# estimate all parameters from wild guesses (orders of magnitude off)
opts.estimate_params = model.parameters
opts.initial_values = [1e-4, 1e3, 1e6, 1e-1, 1e-1, 1e-1]
elif scenario in (2, 3, 5):
# estimate rates only (not initial conditions) from wild guesses
opts.estimate_params = [p for p in model.parameters if p.name.startswith('k') ]
opts.initial_values = [1e-4, 1e3, 1e6]
elif scenario == 4:
# estimate k1 and k2 only
opts.estimate_params = [model.parameters['k1'], model.parameters['k2']]
opts.initial_values = [1e-4, 1e3]
else:
raise RuntimeError("unknown scenario number")
opts.nsteps = 10000
opts.likelihood_fn = likelihood
opts.prior_fn = prior
opts.step_fn = step
opts.use_hessian = True
opts.hessian_period = opts.nsteps / 10
opts.seed = seed
mcmc = bayessb.MCMC(opts)
mcmc.run()
estimate = numpy.median(mcmc.positions[mcmc.accepts], 0)
print_fit(estimate)
plot_fit(estimate)
|
{
"content_hash": "adc515e31a95dd17f7f271a4af67c6ef",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 109,
"avg_line_length": 31.544,
"alnum_prop": 0.6107025107785949,
"repo_name": "jmuhlich/bayessb",
"id": "2473628feed3967180e9320a7291b498242687f2",
"size": "3943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/robertson/run.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "187314"
}
],
"symlink_target": ""
}
|
import flask
import flask_bootstrap
from configobj import ConfigObj
from redmine import Redmine
config = ConfigObj('config.ini')
RM_USER = config['user']
RM_KEY = config['key']
RM_URL = config['url']
PROJ_ID = config['project']
TRACKER_MAP = {'epic': 11,
'bug': 1,
'task': 12,
'not-epic': '!11'}
redmine = Redmine(RM_URL, username=RM_URL, key=RM_KEY,
requests={'verify': False})
def get_issues(proj, tracker, **kwargs):
issues = redmine.issue.filter(project_id=proj,
status_id='open',
sort='priority:desc',
tracker_id=TRACKER_MAP[tracker],
**kwargs)
return issues
def index(user_id=None):
epics = get_issues(PROJ_ID, 'epic')
issues = {}
issues[0] = []
if user_id:
i_tmp = get_issues(PROJ_ID, 'not-epic', assigned_to_id=user_id)
else:
i_tmp = get_issues(PROJ_ID, 'not-epic')
for epic in epics:
issues[int(epic.id)] = []
for i in i_tmp:
try:
parent = int(i.parent)
except:
parent = 0
if parent in issues:
issues[parent].append(i)
return flask.render_template('index.html', epics=epics, issues=issues,
url=RM_URL)
def create_app(configfile=None):
app = flask.Flask('red-list')
flask_bootstrap.Bootstrap(app)
app.add_url_rule('/', None, index)
app.add_url_rule('/assigned/<int:user_id>', None, index)
return app
if __name__ == '__main__':
create_app().run(debug=True, host='0.0.0.0')
|
{
"content_hash": "af0dabc226808c6971bd5be475d9e6b8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 24.347826086956523,
"alnum_prop": 0.5297619047619048,
"repo_name": "kremlinkev/red_list",
"id": "3bb8fa8c9f64eca7e5f1613aa96b27902c37721a",
"size": "1705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "red_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1705"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from pathlib import Path
SOURCE_ROOT = Path(__file__).resolve().parents[1]
|
{
"content_hash": "d18fedb11fbde0278fbb890db9f8bbf2",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 22.4,
"alnum_prop": 0.7232142857142857,
"repo_name": "cfei18/incubator-airflow",
"id": "db79d1d862c27cefd690875330723c1e7f5ccead",
"size": "897",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docker_tests/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
import pytest
import tensorflow as tf
import numpy as np
import tfs.core.layer.ops as ops
from tfs.core.layer.dropout import Dropout
from tfs.network import Network
net = Network()
@pytest.fixture
def l():
l = Dropout(
net,
keep_prob=1.0,
)
return l
class TestDropout:
def test_build_inverse(self,l):
_in = tf.zeros([1,10,10,4])
_out=l.build(_in)
assert _out.get_shape().as_list()==[1,10,10,4]
|
{
"content_hash": "d8c47a6ebdce7f62858b9e326c0961ad",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 18.52173913043478,
"alnum_prop": 0.6713615023474179,
"repo_name": "crackhopper/TFS-toolbox",
"id": "31a44f6ab4b8dfd0c8046dc1c3d101280df0810b",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/layer/dropout_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1935"
},
{
"name": "Jupyter Notebook",
"bytes": "1820326"
},
{
"name": "Python",
"bytes": "99327"
},
{
"name": "Shell",
"bytes": "178"
}
],
"symlink_target": ""
}
|
"""OpenStack-related utilities."""
import sys
try:
if sys.version_info > (3, 2, 0): # pragma: no cover
import urllib.parse as urlparse
else: # pragma: no cover
import urlparse
except ImportError: # pragma: no cover
raise SystemExit('No urlparse module was found.')
try:
from openstack import connection as os_conn # pragma: no cover
except ImportError as e: # pragma: no cover
raise SystemExit('OpenStack plugins require access to the OpenStackSDK.'
' Please install "python-openstacksdk".'
' ERROR: %s' % str(e))
from monitorstack import utils
class OpenStack(object):
"""Class for reusable OpenStack utility methods."""
def __init__(self, os_auth_args):
"""Initialization method for class.
:param os_auth_args: dict containing auth creds.
:type os_auth_args: dict
"""
self.os_auth_args = os_auth_args
@property
def conn(self):
"""Return an OpenStackSDK connection.
:returns: object
"""
return os_conn.Connection(**self.os_auth_args)
def _session_req(self, path, service_type, interface='internal'):
"""Return compute resource limits for a project.
:param path: URL path to make a request against.
:type path: str
:param interface: Interface name, normally [internal, public, admin].
:type interface: str
:returns: dict
"""
endpoint_url = self.conn.session.get_endpoint(
interface=interface,
service_type=service_type
)
sess_url = urlparse.urljoin(endpoint_url, path)
return self.conn.session.get(sess_url).json()
def get_consumer_usage(self, servers=None, marker=None, limit=512):
"""Retrieve current usage by an OpenStack cloud consumer.
:param servers: ID of a given project to lookup.
:type servers: str || uuid
:param marker: ID of last server seen.
:type marker: str || uuid
:param limit: Number of items a single API call can return.
:type limit: int
:returns: list
"""
tenant_kwargs = {'details': True, 'all_tenants': True, 'limit': limit}
if not servers:
servers = list()
if marker:
tenant_kwargs['marker'] = marker
count = 0
for server in self.conn.compute.servers(**tenant_kwargs):
servers.append(server.to_dict())
count += 1
if count == limit:
return self.get_consumer_usage(
servers=servers,
marker=servers[-1]['id']
)
return servers
def get_compute_limits(self, project_id, interface='internal'):
"""Return compute resource limits for a project.
:param project_id: ID of a given project to lookup.
:type project_id: str || uuid
:param interface: Interface name, normally [internal, public, admin].
:type interface: str
:returns: dict
"""
path = '/os-quota-sets/' + project_id
return self._session_req(
path=path,
service_type='compute',
interface=interface
)
def get_projects(self):
"""Retrieve a list of projects.
:returns: list
"""
_consumers = list()
with utils.LocalCache() as c:
for project in self.conn.identity.projects():
_consumers.append(project)
cache_key = 'projects_' + str(project.id)
c.set(
cache_key,
project.to_dict(),
expire=43200,
tag='projects'
)
return _consumers
def get_project(self, project_id):
"""Retrieve project data.
:param project_id: ID of a given project to lookup.
:type project_id: str || uuid
:returns: dict
"""
project = None
cache_key = 'projects_{}'.format(project_id)
with utils.LocalCache() as c:
try:
project = c.get(cache_key)
if not project:
raise LookupError
except LookupError:
project_info = self.conn.identity.get_project(project_id)
project = project_info.to_dict()
c.set(cache_key, project, expire=43200, tag='projects')
finally:
return project
def get_project_name(self, project_id):
"""Retrieve the name of a project."""
return self.get_project(project_id=project_id)['name']
def get_flavors(self):
"""Retrieve all of flavors.
:returns: dict
"""
flavors = dict()
with utils.LocalCache() as c:
for flavor in self.conn.compute.flavors():
_flavor = flavor.to_dict()
cache_key = 'flavor_' + str(flavor.id)
c.set(
cache_key,
_flavor,
expire=43200,
tag='flavors'
)
entry = flavors[flavor.id] = dict()
entry.update(_flavor)
return flavors
def get_flavor(self, flavor_id):
"""Retrieve a flavor.
:param flavor_id: ID of a given flavor to lookup.
:type flavor_id: int || str
:returns: dict
"""
flavor = None
cache_key = 'flavor_{}'.format(flavor_id)
with utils.LocalCache() as c:
try:
flavor = c.get(cache_key)
if not flavor:
raise LookupError
except LookupError:
flavor_info = self.conn.compute.get_flavor(flavor_id)
flavor = flavor_info.to_dict()
c.set(cache_key, flavor, expire=43200, tag='flavors')
finally:
return flavor
def get_flavor_name(self, flavor_id):
"""Retrieve the name of a flavor.
:param flavor_id: ID of a given flavor to lookup.
:type flavor_id: int || str
:returns: str
"""
return self.get_flavor(flavor_id=flavor_id)['name']
def get_volume_pool_stats(self, interface='internal'):
"""Return volume pool usages.
:param interface: Interface name, normally [internal, public, admin].
:type interface: str
:returns: dict
"""
path = '/scheduler-stats/get_pools?detail=True'
return self._session_req(
path=path,
service_type='volume',
interface=interface
)
|
{
"content_hash": "2ead87eb3413e357d630112cd38e4346",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 78,
"avg_line_length": 32.00956937799043,
"alnum_prop": 0.5412556053811659,
"repo_name": "major/monitorstack",
"id": "2572d78503068a0c869566a20efac91d761ab4a8",
"size": "7289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitorstack/utils/os_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98887"
}
],
"symlink_target": ""
}
|
"""Abstract base classes related to import."""
from . import _bootstrap
from . import machinery
import abc
import types
class Loader(metaclass=abc.ABCMeta):
"""Abstract base class for import loaders."""
@abc.abstractmethod
def load_module(self, fullname:str) -> types.ModuleType:
"""Abstract method which when implemented should load a module."""
raise NotImplementedError
class Finder(metaclass=abc.ABCMeta):
"""Abstract base class for import finders."""
@abc.abstractmethod
def find_module(self, fullname:str, path:[str]=None) -> Loader:
"""Abstract method which when implemented should find a module."""
raise NotImplementedError
Finder.register(machinery.BuiltinImporter)
Finder.register(machinery.FrozenImporter)
Finder.register(machinery.PathFinder)
class ResourceLoader(Loader):
"""Abstract base class for loaders which can return data from their
back-end storage.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def get_data(self, path:str) -> bytes:
"""Abstract method which when implemented should return the bytes for
the specified path."""
raise NotImplementedError
class InspectLoader(Loader):
"""Abstract base class for loaders which support inspection about the
modules they can load.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def is_package(self, fullname:str) -> bool:
"""Abstract method which when implemented should return whether the
module is a package."""
return NotImplementedError
@abc.abstractmethod
def get_code(self, fullname:str) -> types.CodeType:
"""Abstract method which when implemented should return the code object
for the module"""
return NotImplementedError
@abc.abstractmethod
def get_source(self, fullname:str) -> str:
"""Abstract method which should return the source code for the
module."""
return NotImplementedError
InspectLoader.register(machinery.BuiltinImporter)
InspectLoader.register(machinery.FrozenImporter)
class PyLoader(_bootstrap.PyLoader, InspectLoader):
"""Abstract base class to assist in loading source code by requiring only
back-end storage methods to be implemented.
The methods get_code, get_source, and load_module are implemented for the
user.
"""
@abc.abstractmethod
def source_path(self, fullname:str) -> object:
"""Abstract method which when implemented should return the path to the
sourced code for the module."""
raise NotImplementedError
class PyPycLoader(_bootstrap.PyPycLoader, PyLoader):
"""Abstract base class to assist in loading source and bytecode by
requiring only back-end storage methods to be implemented.
The methods get_code, get_source, and load_module are implemented for the
user.
"""
@abc.abstractmethod
def source_mtime(self, fullname:str) -> int:
"""Abstract method which when implemented should return the
modification time for the source of the module."""
raise NotImplementedError
@abc.abstractmethod
def bytecode_path(self, fullname:str) -> object:
"""Abstract method which when implemented should return the path to the
bytecode for the module."""
raise NotImplementedError
@abc.abstractmethod
def write_bytecode(self, fullname:str, bytecode:bytes):
"""Abstract method which when implemented should attempt to write the
bytecode for the module."""
raise NotImplementedError
|
{
"content_hash": "c29312aae0201b4a4e722bd1d60cf428",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 30.262295081967213,
"alnum_prop": 0.7055796316359697,
"repo_name": "MalloyPower/parsing-python",
"id": "b2bdb02c92645f79d08bb388b1aa3ef3524b0b4a",
"size": "3692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/importlib/abc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
import ctypes
from itertools import chain
import re
import struct
import traceback
import time
import datetime
from urllib import unquote
import uuid
from hashlib import md5
from random import randrange, choice
import greenlet
from eventlet import GreenPile
from eventlet import GreenPool
from eventlet import Queue
from eventlet import spawn_n
from eventlet.green import socket
from eventlet.timeout import Timeout
import zlib
from swift.common.storage_policy import POLICIES
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.wsgi import make_subrequest
from swiftclient.client import quote
from swift.common.http import HTTP_CONTINUE
from swift.common.http import is_success
from swift.common.http import HTTP_INSUFFICIENT_STORAGE
from swift.common.http import is_client_error
from swift.common.http import HTTP_NOT_FOUND
from swift.common.http import HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
from swift.proxy.controllers.base import update_headers
from swift.proxy.controllers.base import delay_denial
from swift.proxy.controllers.base import cors_validation
from swift.proxy.controllers.base import get_info
from swift.proxy.controllers.base import close_swift_conn
from swift.common.utils import split_path
from swift.common.utils import get_logger
from swift.common.utils import TRUE_VALUES
from swift.common.utils import get_remote_client
from swift.common.utils import ContextPool
from swift.common.utils import cache_from_env
from swift.common.utils import normalize_timestamp
from swift.common.utils import GreenthreadSafeIterator
from swift.proxy.server import ObjectController
from swift.proxy.server import ContainerController
from swift.proxy.server import AccountController
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.exceptions import ChunkReadTimeout
from swift.common.constraints import check_utf8
from swift.common.constraints import MAX_FILE_SIZE
from swift.common.constraints import MAX_HEADER_SIZE
from swift.common.constraints import MAX_META_NAME_LENGTH
from swift.common.constraints import MAX_META_VALUE_LENGTH
from swift.common.constraints import MAX_META_COUNT
from swift.common.constraints import MAX_META_OVERALL_SIZE
from swift.common.swob import Request
from swift.common.swob import Response
from swift.common.swob import HTTPNotFound
from swift.common.swob import HTTPPreconditionFailed
from swift.common.swob import HTTPRequestTimeout
from swift.common.swob import HTTPRequestEntityTooLarge
from swift.common.swob import HTTPBadRequest
from swift.common.swob import HTTPUnprocessableEntity
from swift.common.swob import HTTPServiceUnavailable
from swift.common.swob import HTTPClientDisconnect
from swift.common.swob import wsgify
from swift.common.swob import HTTPNotImplemented
from swift.common.swob import HeaderKeyDict
from swift.common.swob import HTTPException
from zerocloud import load_server_conf
from zerocloud.common import CLUSTER_CONFIG_FILENAME
from zerocloud.common import NODE_CONFIG_FILENAME
from zerocloud import TAR_MIMES
from zerocloud import POST_TEXT_OBJECT_SYSTEM_MAP
from zerocloud import POST_TEXT_ACCOUNT_SYSTEM_MAP
from zerocloud import merge_headers
from zerocloud import DEFAULT_EXE_SYSTEM_MAP
from zerocloud import STREAM_CACHE_SIZE
from zerocloud.common import parse_location
from zerocloud import can_run_as_daemon
from zerocloud.common import SwiftPath
from zerocloud.common import ImagePath
from zerocloud import TIMEOUT_GRACE
from zerocloud.configparser import ClusterConfigParser
from zerocloud.configparser import ClusterConfigParsingError
from zerocloud.tarstream import StringBuffer
from zerocloud.tarstream import UntarStream
from zerocloud.tarstream import TarStream
from zerocloud.tarstream import REGTYPE
from zerocloud.tarstream import BLOCKSIZE
from zerocloud.tarstream import NUL
from zerocloud.tarstream import ExtractedFile
from zerocloud.tarstream import Path
from zerocloud.tarstream import ReadError
from zerocloud.thread_pool import Zuid
ZEROVM_COMMANDS = ['open', 'api']
ZEROVM_EXECUTE = 'x-zerovm-execute'
try:
import simplejson as json
except ImportError:
import json
STRIP_PAX_HEADERS = ['mtime']
# Monkey patching Request to support content_type property properly
def _req_content_type_property():
"""
Set and retrieve Request.content_type
Strips off any charset when retrieved
"""
def getter(self):
if 'content-type' in self.headers:
return self.headers.get('content-type').split(';')[0]
def setter(self, value):
self.headers['content-type'] = value
return property(getter, setter,
doc="Retrieve and set the request Content-Type header")
Request.content_type = _req_content_type_property()
def check_headers_metadata(new_req, headers, target_type, req, add_all=False):
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in headers.iteritems():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
raise HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
if add_all and key.lower() not in STRIP_PAX_HEADERS and not \
key.lower().startswith('x-nexe-'):
new_req.headers[key] = value
continue
new_req.headers[key] = value
key = key[len(prefix):]
if not key:
raise HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
raise HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
raise HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
raise HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
raise HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
def is_zerocloud_request(version, account, headers):
return account and (ZEROVM_EXECUTE in headers or version in
ZEROVM_COMMANDS)
class GreenPileEx(GreenPile):
"""Pool with iterator semantics. Good for I/O-related tasks."""
def __init__(self, size_or_pool=1000):
super(GreenPileEx, self).__init__(size_or_pool)
self.current = None
def next(self):
"""Wait for the next result, suspending the current greenthread until it
is available. Raises StopIteration when there are no more results."""
if self.counter == 0 and self.used:
raise StopIteration()
try:
if not self.current:
self.current = self.waiters.get()
res = self.current.wait()
self.current = None
return res
finally:
if not self.current:
self.counter -= 1
class CachedBody(object):
"""Implements caching and iterative consumption of large bodies.
Typical (and currently, the only) uses are for managing large tarball or
script submissions from the user. The reason why we do this is because user
submitted content is allowed to be any size--so we don't want to hold, for
example, an entire 5GiB tarball in memory.
CachedBody is iterable. The ``cache`` parameter contains at all times the
"head", while the ``read_iter`` contains the "tail".
"""
def __init__(self, read_iter, cache=None, cache_size=STREAM_CACHE_SIZE,
total_size=None):
"""
:param read_iter:
A stream iterable.
:param list cache:
Defaults to None. If ``cache`` is None, constructing a `CachedBody`
object will initialize the ``cache`` and read _at least_
``cache_size`` bytes from ``read_iter`` and store them in
``cache``. In other words, the beginning of a stream.
If a ``cache`` is specified, this can represent the intermediate
state of a cached body, where something is already in the cache. In
other words, "mid-stream".
:param int cache_size:
Minimum amount of bytes to cache from ``read_iter``. Note: If the
size of each chunk from ``read_iter`` is greater than
``cache_size``, the actual amount of bytes cached in ``cache`` will
be the chunk size.
:param int total_size:
(In bytes.) If ``total_size`` is set, iterate over the
``read_iter`` stream until ``total_size`` counts down to 0.
Else, just read chunks until ``read_iter`` raises a
`StopIteration`.
"""
self.read_iter = read_iter
self.total_size = total_size
if cache:
self.cache = cache
else:
self.cache = []
size = 0
for chunk in read_iter:
self.cache.append(chunk)
size += len(chunk)
if size >= cache_size:
break
def __iter__(self):
if self.total_size:
for chunk in self.cache:
self.total_size -= len(chunk)
if self.total_size < 0:
yield chunk[:self.total_size]
break
else:
yield chunk
if self.total_size > 0:
for chunk in self.read_iter:
self.total_size -= len(chunk)
if self.total_size < 0:
yield chunk[:self.total_size]
break
else:
yield chunk
for _junk in self.read_iter:
pass
else:
for chunk in self.cache:
yield chunk
for chunk in self.read_iter:
yield chunk
class FinalBody(object):
def __init__(self, app_iter):
self.app_iters = [app_iter]
def __iter__(self):
for app_iter in self.app_iters:
for chunk in app_iter:
yield chunk
def append(self, app_iter):
self.app_iters.append(app_iter)
class NameService(object):
"""DNS-like server using a binary protocol.
This is usable only with ZeroMQ-based networking for ZeroVM, and not
zbroker.
DNS resolves names to IPs; this name service resolves IDs to IP+port.
"""
# INTEGER (4 bytes)
INT_FMT = '!I'
# INTEGER (4 bytes) + HOST (2 bytes)
INPUT_RECORD_FMT = '!IH'
# 4 bytes of string + HOST (2 bytes)
OUTPUT_RECORD_FMT = '!4sH'
INT_SIZE = struct.calcsize(INT_FMT)
INPUT_RECORD_SIZE = struct.calcsize(INPUT_RECORD_FMT)
OUTPUT_RECORD_SIZE = struct.calcsize(OUTPUT_RECORD_FMT)
def __init__(self, peers):
"""
:param int peers:
Number of ZeroVM instances that will contact this name server.
"""
self.port = None
self.hostaddr = None
self.peers = peers
self.sock = None
self.thread = None
self.bind_map = {}
self.conn_map = {}
self.peer_map = {}
self.int_pool = GreenPool()
def start(self, pool):
"""
:param pool:
`GreenPool` instance
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind to any port, any address
self.sock.bind(('', 0))
self.thread = pool.spawn(self._run)
(self.hostaddr, self.port) = self.sock.getsockname()
def _run(self):
while 1:
try:
message, peer_address = self.sock.recvfrom(65535)
offset = 0
peer_id = struct.unpack_from(NameService.INT_FMT, message,
offset)[0]
offset += NameService.INT_SIZE
bind_count = struct.unpack_from(NameService.INT_FMT, message,
offset)[0]
offset += NameService.INT_SIZE
connect_count = struct.unpack_from(NameService.INT_FMT,
message, offset)[0]
offset += NameService.INT_SIZE
for i in range(bind_count):
connecting_host, port = struct.unpack_from(
NameService.INPUT_RECORD_FMT, message, offset)[0:2]
offset += NameService.INPUT_RECORD_SIZE
self.bind_map.setdefault(peer_id, {})[connecting_host] = \
port
self.conn_map[peer_id] = (connect_count,
offset,
ctypes.create_string_buffer(
message[:]))
# peer_address[0] == ip
self.peer_map.setdefault(peer_id, {})[0] = peer_address[0]
# peer_address[1] == port
self.peer_map.setdefault(peer_id, {})[1] = peer_address[1]
if len(self.peer_map) == self.peers:
for peer_id in self.peer_map.iterkeys():
(connect_count, offset, reply) = self.conn_map[peer_id]
for i in range(connect_count):
connecting_host = struct.unpack_from(
NameService.INT_FMT, reply, offset)[0]
port = self.bind_map[connecting_host][peer_id]
connect_to = self.peer_map[connecting_host][0]
if connect_to == self.peer_map[peer_id][0]:
# both on the same host
connect_to = '127.0.0.1'
struct.pack_into(NameService.OUTPUT_RECORD_FMT,
reply, offset,
socket.inet_pton(socket.AF_INET,
connect_to),
port)
offset += NameService.OUTPUT_RECORD_SIZE
self.sock.sendto(reply, (self.peer_map[peer_id][0],
self.peer_map[peer_id][1]))
except greenlet.GreenletExit:
return
except Exception:
print traceback.format_exc()
pass
def stop(self):
self.thread.kill()
self.sock.close()
class ProxyQueryMiddleware(object):
def list_account(self, account, mask=None, marker=None, request=None):
new_req = request.copy_get()
new_req.path_info = '/' + quote(account)
new_req.query_string = 'format=json'
if marker:
new_req.query_string += '&marker=' + marker
resp = AccountController(self.app, account).GET(new_req)
if resp.status_int == 204:
data = resp.body
return []
if resp.status_int < 200 or resp.status_int >= 300:
raise Exception('Error querying object server')
data = json.loads(resp.body)
if marker:
return data
ret = []
while data:
for item in data:
if not mask or mask.match(item['name']):
ret.append(item['name'])
marker = data[-1]['name']
data = self.list_account(account, mask=None, marker=marker,
request=request)
return ret
def list_container(self, account, container, mask=None, marker=None,
request=None):
new_req = request.copy_get()
new_req.path_info = '/' + quote(account) + '/' + quote(container)
new_req.query_string = 'format=json'
if marker:
new_req.query_string += '&marker=' + marker
# We need to remove the authorize function here on this request in
# order to allow an "other" or "anonymous" user to be to allowed to
# run this container listing in some job. It is only removed from
# `new_req`.
# This should not allow the "other/anonymous" user to list the
# container directly; this will only be allowed within the context of
# execution under setuid permission. Any direct container listing from
# this user will result in a "401 Unauthorized" (from Swift) before we
# ever get here.
if 'swift.authorize' in new_req.environ:
del new_req.environ['swift.authorize']
resp = ContainerController(self.app, account, container).GET(new_req)
if resp.status_int == 204:
data = resp.body
return []
if resp.status_int < 200 or resp.status_int >= 300:
raise Exception('Error querying object server')
data = json.loads(resp.body)
if marker:
return data
ret = []
while data:
for item in data:
if item['name'][-1] == '/':
continue
if not mask or mask.match(item['name']):
ret.append(item['name'])
marker = data[-1]['name']
data = self.list_container(account, container,
mask=None, marker=marker,
request=request)
return ret
def parse_daemon_config(self, daemon_list):
result = []
request = Request.blank('/daemon', environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/json'})
socks = {}
for sock, conf_file in zip(*[iter(daemon_list)] * 2):
if socks.get(sock, None):
self.logger.warning('Duplicate daemon config for uuid %s'
% sock)
continue
socks[sock] = 1
try:
json_config = json.load(open(conf_file))
except IOError:
self.logger.warning('Cannot load daemon config file: %s'
% conf_file)
continue
parser = ClusterConfigParser(self.zerovm_sysimage_devices,
self.zerovm_content_type,
self.parser_config,
self.list_account,
self.list_container,
network_type=self.network_type)
try:
parser.parse(json_config, False, request=request)
except ClusterConfigParsingError, e:
self.logger.warning('Daemon config %s error: %s'
% (conf_file, str(e)))
continue
if len(parser.nodes) != 1:
self.logger.warning('Bad daemon config %s: too many nodes'
% conf_file)
for node in parser.nodes.itervalues():
if node.bind or node.connect:
self.logger.warning('Bad daemon config %s: '
'network channels are present'
% conf_file)
continue
if not isinstance(node.exe, ImagePath):
self.logger.warning('Bad daemon config %s: '
'exe path must be in image file'
% conf_file)
continue
image = None
for sysimage in parser.sysimage_devices.keys():
if node.exe.image == sysimage:
image = sysimage
break
if not image:
self.logger.warning('Bad daemon config %s: '
'exe is not in sysimage device'
% conf_file)
continue
node.channels = sorted(node.channels, key=lambda ch: ch.device)
result.append((sock, node))
self.logger.info('Loaded daemon config %s with UUID %s'
% (conf_file, sock))
return result
def __init__(self, app, conf, logger=None,
object_ring=None, container_ring=None):
self.app = app
if logger:
self.logger = logger
else:
self.logger = get_logger(conf, log_route='proxy-query')
# let's load appropriate server config sections here
load_server_conf(conf, ['app:proxy-server'])
timeout = int(conf.get('zerovm_timeout',
conf.get('node_timeout', 10)))
self.zerovm_timeout = timeout
self.node_timeout = timeout + (TIMEOUT_GRACE * 2)
self.immediate_response_timeout = float(conf.get(
'interactive_timeout', timeout)) + (TIMEOUT_GRACE * 2)
self.ignore_replication = conf.get(
'zerovm_ignore_replication', 'f').lower() in TRUE_VALUES
# network chunk size for all network ops
self.network_chunk_size = int(conf.get('network_chunk_size',
65536))
# max time to wait for upload to finish, used in POST requests
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.client_timeout = float(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
# execution engine version
self.zerovm_execute_ver = '1.0'
# maximum size of a system map file
self.zerovm_maxconfig = int(conf.get('zerovm_maxconfig', 65536))
# name server hostname or ip, will be autodetected if not set
self.zerovm_ns_hostname = conf.get('zerovm_ns_hostname')
# name server thread pool size
self.zerovm_ns_maxpool = int(conf.get('zerovm_ns_maxpool', 1000))
self.zerovm_ns_thrdpool = GreenPool(self.zerovm_ns_maxpool)
# use newest files when running zerovm executables, default - False
self.zerovm_uses_newest = conf.get(
'zerovm_uses_newest', 'f').lower() in TRUE_VALUES
# use executable validation info, stored on PUT or POST,
# to shave some time on zerovm startup
self.zerovm_prevalidate = conf.get(
'zerovm_prevalidate', 'f').lower() in TRUE_VALUES
# use CORS workaround to POST execute commands, default - False
self.zerovm_use_cors = conf.get(
'zerovm_use_cors', 'f').lower() in TRUE_VALUES
# Accounting: enable or disabe execution accounting data,
# default - disabled
self.zerovm_accounting_enabled = conf.get(
'zerovm_accounting_enabled', 'f').lower() in TRUE_VALUES
# Accounting: system account for storing accounting data
self.cdr_account = conf.get('user_stats_account', 'userstats')
# Accounting: storage API version
self.version = 'v1'
# default content-type for unknown files
self.zerovm_content_type = conf.get(
'zerovm_default_content_type', 'application/octet-stream')
# names of sysimage devices, no sysimage devices exist by default
devs = [(i.strip(), None)
for i in conf.get('zerovm_sysimage_devices', '').split()
if i.strip()]
self.zerovm_sysimage_devices = dict(devs)
# GET support: container for content-type association storage
self.zerovm_registry_path = '.zvm'
# GET support: cache config files for this amount of seconds
self.zerovm_cache_config_timeout = 60
self.parser_config = {
'limits': {
# total maximum iops for channel read or write operations
# per zerovm session
'reads': int(conf.get('zerovm_maxiops', 1024 * 1048576)),
'writes': int(conf.get('zerovm_maxiops', 1024 * 1048576)),
# total maximum bytes for a channel write operations
# per zerovm session
'rbytes': int(conf.get('zerovm_maxoutput', 1024 * 1048576)),
# total maximum bytes for a channel read operations
# per zerovm session
'wbytes': int(conf.get('zerovm_maxinput', 1024 * 1048576))
}
}
# storage policies that will be used for random node picking
policies = [i.strip()
for i in conf.get('standalone_policies', '').split()
if i.strip()]
self.standalone_policies = []
for pol in policies:
try:
pol_idx = int(pol)
policy = POLICIES.get_by_index(pol_idx)
except ValueError:
policy = POLICIES.get_by_name(pol)
if policy:
self.standalone_policies.append(policy.idx)
else:
self.logger.warning('Could not load storage policy: %s'
% pol)
if not self.standalone_policies:
self.standalone_policies = [0]
# use direct tcp connections (tcp) or intermediate broker (opaque)
self.network_type = conf.get('zerovm_network_type', 'tcp')
# 'opaque' == 'zbroker'
# NOTE(larsbutler): for more info about zbroker, see
# https://github.com/zeromq/zbroker
if self.network_type == 'opaque':
# opaque network does not support replication right now
self.ignore_replication = True
# list of daemons we need to lazy load
# (first request will start the daemon)
daemon_list = [i.strip() for i in
conf.get('zerovm_daemons', '').split() if i.strip()]
self.zerovm_daemons = self.parse_daemon_config(daemon_list)
self.uid_generator = Zuid()
@wsgify
def __call__(self, req):
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except ValueError:
return HTTPNotFound(request=req)
if is_zerocloud_request(version, account, req.headers):
exec_ver = '%s/%s' % (version, self.zerovm_execute_ver)
exec_header_ver = req.headers.get(ZEROVM_EXECUTE, exec_ver)
req.headers[ZEROVM_EXECUTE] = exec_header_ver
if req.content_length and req.content_length < 0:
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
if not check_utf8(req.path_info):
return HTTPPreconditionFailed(request=req, body='Invalid UTF8')
controller = self.get_controller(exec_header_ver, account,
container, obj)
if not controller:
return HTTPPreconditionFailed(request=req, body='Bad URL')
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id = 'tx' + uuid.uuid4().hex
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
if version:
req.path_info_pop()
try:
handler = getattr(controller, req.method)
except AttributeError:
return HTTPPreconditionFailed(request=req,
body='Bad HTTP method')
start_time = time.time()
# each request is assigned a unique k-sorted id
# it will be used by QoS code to assign slots/priority
req.headers['x-zerocloud-id'] = self.uid_generator.get()
req.headers['x-zerovm-timeout'] = self.zerovm_timeout
try:
res = handler(req)
except HTTPException as error_response:
return error_response
perf = time.time() - start_time
if 'x-nexe-cdr-line' in res.headers:
res.headers['x-nexe-cdr-line'] = \
'%.3f, %s' % (perf, res.headers['x-nexe-cdr-line'])
return res
return self.app
def get_controller(self, version, account, container, obj):
if version == 'open/1.0':
if container and obj:
return RestController(self.app, account, container, obj, self,
version)
return None
elif version == 'api/1.0':
if container:
return ApiController(self.app, account, container, obj, self,
version)
return None
return ClusterController(self.app, account, container, obj, self,
version)
def select_random_partition(ring):
partition_count = ring.partition_count
part = randrange(0, partition_count)
return part
class ClusterController(ObjectController):
header_exclusions = [get_sys_meta_prefix('account'),
get_sys_meta_prefix('container'),
get_sys_meta_prefix('object'),
'x-backend', 'x-auth', 'content-type',
'content-length', 'x-storage-token', 'cookie']
def __init__(self, app, account_name, container_name, obj_name, middleware,
command, **kwargs):
ObjectController.__init__(self, app,
account_name,
container_name or '',
obj_name or '')
self.middleware = middleware
self.command = command
self.parser = ClusterConfigParser(
{},
self.middleware.zerovm_content_type,
self.middleware.parser_config,
self.middleware.list_account,
self.middleware.list_container,
network_type=self.middleware.network_type)
self.exclusion_test = self.make_exclusion_test()
self.image_resp = None
self.cgi_env = None
self.exe_resp = None
self.cluster_config = ''
def create_cgi_env(self, req):
headers = dict(req.headers)
keys = filter(self.exclusion_test, headers)
for key in keys:
headers.pop(key)
env = {}
env.update(('HTTP_' + k.upper().replace('-', '_'), v)
for k, v in headers.items())
env['REQUEST_METHOD'] = req.method
env['REMOTE_USER'] = req.remote_user
env['QUERY_STRING'] = req.query_string
env['PATH_INFO'] = req.path_info
env['REQUEST_URI'] = req.path_qs
return env
def make_exclusion_test(self):
expr = '|'.join(self.header_exclusions)
test = re.compile(expr, re.IGNORECASE)
return test.match
def get_daemon_socket(self, config):
for daemon_sock, daemon_conf in self.middleware.zerovm_daemons:
if can_run_as_daemon(config, daemon_conf):
return daemon_sock
return None
def get_standalone_policy(self):
policy = choice(self.middleware.standalone_policies)
ring = self.app.get_object_ring(policy)
return ring, policy
def _get_own_address(self):
if self.middleware.zerovm_ns_hostname:
addr = self.middleware.zerovm_ns_hostname
else:
addr = None
object_ring = self.app.get_object_ring(0)
partition_count = object_ring.partition_count
part = randrange(0, partition_count)
nodes = object_ring.get_part_nodes(part)
for n in nodes:
addr = _get_local_address(n)
if addr:
break
return addr
def _make_exec_requests(self, pile, exec_requests):
"""Make execution request connections and start the execution.
This method calls :meth:`_connect_exec_node` to start the execution.
:param pile:
:class:`GreenPileEx` instance.
:param exec_requests:
`list` of `swift.common.swob.Request` objects.
:returns:
`list` of `swift.common.bufferedhttp.BufferedHTTPConnection`
objects.
"""
exec_list = []
known_locations = {}
known_salts = {}
result = []
logger = self.app.logger.thread_locals
for exec_request in exec_requests:
node = exec_request.node
account, container, obj = (
# NOTE(larsbutler): `node.path_info` is a path like one of the
# following:
# - /account
# - /account/container
# - /account/container/object
split_path(node.path_info, 1, 3, True))
container_info = self.container_info(account, container,
exec_request)
container_partition = container_info['partition']
# nodes in the cluster which contain a replica of this container:
container_nodes = container_info['nodes']
if not container_nodes:
# We couldn't find the container.
# Probably the job referenced a container that either doesn't
# exist or has no replicas at the moment.
raise HTTPNotFound(request=exec_request,
body='Error while fetching %s'
% node.path_info)
if obj:
# The reauest is targetting an object.
# (A request can be sent to /account, /account/container, or
# /account/container/object.
# In this case, we have an object.
# Try to co-locate with the object:
policy_index = exec_request.headers.get(
'X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
ring = self.app.get_object_ring(policy_index)
partition = ring.get_part(account, container, obj)
# ``node_iter`` is all of the candiate object servers
# for running the job.
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(ring,
partition))
# If the storage-policy-index was not set, we set it.
# Why does swift need this to be set?
# Because the object servers don't know about policies.
# Object servers use different volumes and names depending on
# the policy index.
# You need to send this so the object server knows where to
# look for files (on a particular drive, for example).
exec_request.headers['X-Backend-Storage-Policy-Index'] = \
str(policy_index)
elif container:
# This request is targetting an /account/container.
# We want to co-locate with the container.
ring = self.app.container_ring
partition = ring.get_part(account, container)
# Same as above: ``node_iter`` is the all of the candidate
# container servers for running the job.
node_iter = GreenthreadSafeIterator(
self.app.iter_nodes(ring, partition))
# NOTE: Containers have no storage policies. See the `obj`
# block above.
else:
# The request is just targetting an account; run it anywhere.
object_ring, policy_index = self.get_standalone_policy()
# Similar to the `obj` case above, but just select a random
# server to execute the job.
partition = select_random_partition(object_ring)
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(
object_ring,
partition))
exec_request.headers['X-Backend-Storage-Policy-Index'] = \
str(policy_index)
# Create N sets of headers
# Usually 1, but can be more for replicates
# FIXME(larsbutler): `_backend_requests` is a private method of the
# Swift's ObjectController class. This is HIGHLY internal and we
# probably shouldn't rely on it.
exec_headers = self._backend_requests(exec_request,
node.replicate,
container_partition,
container_nodes)
if node.skip_validation:
for hdr in exec_headers:
hdr['x-zerovm-valid'] = 'true'
node_list = [node]
node_list.extend(node.replicas)
# main nodes and replicas must all run on different servers
for i, repl_node in enumerate(node_list):
# co-location:
location = ('%s-%d' % (node.location, i)
if node.location else None)
# If we are NOT co-locating, just kick off all of the execution
# jobs in parallel (using a GreenPileEx).
# If we ARE co-locating, run the first of the execution jobs by
# itself to determine a location to run, and then run all of
# the remaining executing jobs on THAT location.
if location and location not in known_locations:
# we are trying to co-locate
salt = uuid.uuid4().hex
conn = self._connect_exec_node(node_iter,
partition,
exec_request,
logger,
repl_node,
exec_headers[i],
[], salt)
# If we get here, we have started to execute
# and either recevied a success, or a continue.
# It can also fail.
# add the swift node (conn.node) to a list of known
# locations,
known_locations[location] = [conn.node]
known_salts[location] = salt
result.append(conn)
else:
# If we reach this, we are either
# a) not co-locating
# OR
# b) we already chose a node for execution, and we try to
# locate everything else there
# If known_nodes is [], we are not co-locating.
# Elif location in known_locations, we have found a
# location and will locate everything there.
known_nodes = known_locations.get(location, [])
exec_list.append((node_iter,
partition,
exec_request,
logger,
repl_node,
exec_headers[i],
known_nodes,
known_salts.get(location, '0')))
for args in exec_list:
# spawn executions in parallel
pile.spawn(self._connect_exec_node, *args)
result.extend([connection for connection in pile if connection])
return result
def _spawn_file_senders(self, conns, pool, req):
for conn in conns:
conn.failed = False
conn.queue = Queue(self.middleware.put_queue_depth)
conn.tar_stream = TarStream()
pool.spawn(self._send_file, conn, req.path)
def _create_request_for_remote_object(self, data_sources, channel,
req, nexe_headers, node):
"""Create a request which fetches remote objects (that is, objects to
which a job is NOT co-located) from the object server. The request is
executed later; we only CREATE the request here and pre-authorize it.
"""
source_resp = None
# channel.path = zerocloud.common.ObjPath instance
# channel.path.path = actual string/url of the object
load_from = channel.path.path
# It's a swift path, but with no object, thus it only is a path to a
# container (/account/container).
# NOTE(larsbutler): Fetching containers as remote objects right now is
# restricted.
# TODO(larsbutler): Document why that is the case.
if isinstance(channel.path, SwiftPath) and not channel.path.obj:
return HTTPBadRequest(request=req,
body='Cannot use container %s as a remote '
'object reference' % load_from)
# NOTE(larsbutler): The following is super important for understanding
# how ZeroCloud jobs are coordinated and remain somewhat efficient.
#
# We reuse requests for remote objects so that we don't fetch things a
# redundant amount of times.
#
# Here's about the most concise but detailed way I can state this:
#
# If multiple object nodes in a given job require the same _remote_
# object (that is, an object which does not have a replica on the
# object node), the proxy node coordinating the job--where this code is
# running right now--will fetch each object _only once_, iteratively
# stream the object in chunks so as to not load too much stuff into
# memory at once, and _push_ copies of the object to each node that
# needs it. I need to emphasize that; when an object node needs a
# remote object to run some job, the proxy node PUSHES the object--the
# object node never pulls. This is an optimization to avoid redundant
# transfer of object data throughout the cluster.
#
# We implement this logic in the next few lines.
#
# Linear search for an already-existing response:
for resp in data_sources:
# We reuse requests, to avoid doing a bunch of redundant fetches
# from the object server. That is, there is no need to a fetch one
# object multiple times.
if resp.request and load_from == resp.request.path_info:
source_resp = resp
break
# response doesn't already exist
if not source_resp:
# copy as GET request
source_req = req.copy_get()
source_req.path_info = load_from
# we don't want to pass query string
source_req.query_string = None
if self.middleware.zerovm_uses_newest:
# object server will try to use the object with the most recent
# timestamp
# this is good because we get the latest,
# this is bad because we have more latency because we ask
# multiple object servers
source_req.headers['X-Newest'] = 'true'
if self.middleware.zerovm_prevalidate \
and 'boot' in channel.device:
# FIXME: request to validate
# proxy doesn't know it is valid or not, it can only request to
# validate
source_req.headers['X-Zerovm-Valid'] = 'true'
acct, src_container_name, src_obj_name = \
split_path(load_from, 1, 3, True)
# We do GET only here, so we check read_acl.
# We don't do PUTs (writes) until after the job, and this is
# authorized in `process_server_response`
self.authorize_job(source_req, acl='read_acl')
source_resp = (
# passes a request to different middleware
ObjectController(self.app,
acct,
src_container_name,
src_obj_name).GET(source_req))
if source_resp.status_int >= 300:
update_headers(source_resp, nexe_headers)
source_resp.body = 'Error %s while fetching %s' \
% (source_resp.status,
source_req.path_info)
return source_resp
# everything went well
source_resp.nodes = []
# collect the data source into the "master" list
# so it can be reused
data_sources.append(source_resp)
# Data sources are all Response objects: some real, some fake
node.last_data = source_resp
# The links between data sources and the nodes which use a given data
# source are bi-directional.
# - Each data source has a reference to all nodes which use it
# - Each node has a reference to all data sources it uses
source_resp.nodes.append({'node': node, 'dev': channel.device})
# check if the validation passed
if (source_resp.headers.get('x-zerovm-valid', None)
and 'boot' in channel.device):
# If the data source is valid and the device is the executable, we
# can skip validation
node.skip_validation = True
for repl_node in node.replicas:
# do the same thing as above for replicated nodes
repl_node.last_data = source_resp
source_resp.nodes.append({'node': repl_node,
'dev': channel.device})
def create_final_response(self, conns, req):
final_body = None
final_response = Response(request=req)
req.cdr_log = []
for conn in conns:
resp = conn.resp
if conn.error:
conn.nexe_headers['x-nexe-error'] = \
conn.error.replace('\n', '')
# Set the response status to the highest one
if conn.resp.status_int > final_response.status_int:
# This collects the most severe error (500 over 400, etc.).
# NOTE(larsbutler): This might be a little weird, though. For
# example, that means that if we get a 503 and then a 507, we
# will return the 507 even though (as far as I can tell) there
# is no real difference in severity between responses at the
# same level (400, 500, etc.).
final_response.status = conn.resp.status
merge_headers(final_response.headers, conn.nexe_headers,
resp.headers)
self._store_accounting_data(req, conn)
if (is_success(resp.status_int)
and 'x-nexe-status' not in resp.headers):
# If there's no x-nexe-status, we have hit an object server
# that doesn't know anything about zerovm (and probably means
# it's not running the ZeroCloud object_query middleware).
return HTTPServiceUnavailable(
request=req,
headers=resp.headers,
body='objectquery middleware is not installed '
'or not functioning')
if resp and resp.headers.get('x-zerovm-daemon', None):
# We don't want to expose the daemon ID to the client, since
# this is internal only, but we do want to notify them that
# daemon execution succeeded (so that they can understand
# significant differences in execution times between some
# jobs).
# The client doesn't/can't choose to run with a daemon; this is
# a internal optimization.
# The 'x-nexe-cached' indicates that a daemon was used. If this
# is not the case, this header is omitted from the final
# response.
final_response.headers['x-nexe-cached'] = 'true'
if resp and resp.content_length > 0:
# We have some "body" to send back to the client.
if not resp.app_iter:
# we might have received either a string or an iter
# TODO(larsbutler): it might be good wrap the right hand
# side in iter()
resp.app_iter = [resp.body]
# this is an old feature, whereby all channels with a null path
# just have their output concatenated and returned
# (any writeable channel with null path, such as stderr,
# stdout, output, etc.)
# Typically, this applies to `stdout`, and is most often how
# this is used.
if final_body:
# this is not the first iteration of the loop
final_body.append(resp.app_iter)
final_response.content_length += resp.content_length
else:
# this the first iteration of the loop
final_body = FinalBody(resp.app_iter)
# FIXME: `resp` needs to be closed at some point;
# it might not get garbage collected, so we need to nuke
# from orbit.
# NOTE: this can cause jobs to send back 0 length, no
# content, and no error
final_response.app_iter = final_body
final_response.content_length = resp.content_length
# NOTE: each output channel may have a different content
# type, so the content-type set here may surprise you!
# we assign the content type of the first channel of the
# first connection that we encounter
final_response.content_type = resp.headers['content-type']
if self.middleware.zerovm_accounting_enabled:
# NOTE(larsbutler): This doesn't work, and should be disabled since
# it relies on object append support (which Swift does not have).
self.middleware.zerovm_ns_thrdpool.spawn_n(
self._store_accounting_data,
req)
if self.middleware.zerovm_use_cors and self.container_name:
container_info = self.container_info(self.account_name,
self.container_name, req)
if container_info.get('cors', None):
# NOTE(larsbutler): This is a workaround for a Swift bug that
# should be solved now.
# Swift defines CORS per container.
# Probably it should define CORS per account.
# FIXME(larsbutler): We should probably test this to see if
# it's still an issue.
if container_info['cors'].get('allow_origin', None):
final_response.headers['access-control-allow-origin'] = \
container_info['cors']['allow_origin']
if container_info['cors'].get('expose_headers', None):
final_response.headers['access-control-expose-headers'] = \
container_info['cors']['expose_headers']
# Why is the etag based on the time?
# Some browsers are very cache-hungry (like Chrome) and so if you
# submit a job multiple times, your browser might give you a cached
# result. And that's bad.
#
# Same thing for an http proxy between the client and the swift proxy.
#
# We cannot base the etag on the results, because we would have to
# potentially cache GBs of data here on the proxy in order to compute
# it, and that's crazy.
etag = md5(str(time.time()))
final_response.headers['Etag'] = etag.hexdigest()
return final_response
def read_system_map(self, read_iter, chunk_size, content_type, req):
upload_expiration = time.time() + self.middleware.max_upload_time
try:
if content_type in ['application/x-gzip']:
read_iter = gunzip_iter(read_iter, chunk_size)
path_list = [StringBuffer(CLUSTER_CONFIG_FILENAME),
StringBuffer(NODE_CONFIG_FILENAME)]
untar_stream = UntarStream(read_iter, path_list)
for chunk in untar_stream:
req.bytes_transferred += len(chunk)
if time.time() > upload_expiration:
raise HTTPRequestTimeout(request=req)
except (ReadError, zlib.error):
raise HTTPUnprocessableEntity(
request=req,
body='Error reading %s stream'
% content_type)
for buf in path_list:
if buf.is_closed:
self.cluster_config = buf.body
break
def _load_input_from_chain(self, req, chunk_size):
data_resp = None
if 'chain.input' in req.environ:
chain_input = req.environ['chain.input']
bytes_left = int(req.environ['chain.input_size']) - \
chain_input.bytes_received
if bytes_left > 0:
data_resp = Response(
app_iter=iter(lambda: chain_input.read(
chunk_size), ''),
headers={
'Content-Length': bytes_left,
'Content-Type': req.environ['chain.input_type']})
data_resp.nodes = []
return data_resp
def read_json_job(self, req, req_iter):
etag = md5()
upload_expiration = time.time() + self.middleware.max_upload_time
for chunk in req_iter:
req.bytes_transferred += len(chunk)
if time.time() > upload_expiration:
raise HTTPRequestTimeout(request=req)
if req.bytes_transferred > \
self.middleware.zerovm_maxconfig:
raise HTTPRequestEntityTooLarge(request=req)
etag.update(chunk)
self.cluster_config += chunk
if 'content-length' in req.headers and \
int(req.headers['content-length']) != req.bytes_transferred:
raise HTTPClientDisconnect(request=req,
body='application/json '
'POST unfinished')
etag = etag.hexdigest()
if 'etag' in req.headers and req.headers['etag'].lower() != etag:
raise HTTPUnprocessableEntity(request=req)
def _tarball_cluster_config(self, req, req_iter):
# Tarball (presumably with system.map) has been POSTed
# we must have Content-Length set for tar-based requests
# as it will be impossible to stream them otherwise
if 'content-length' not in req.headers:
raise HTTPBadRequest(request=req,
body='Must specify Content-Length')
headers = {'Content-Type': req.content_type,
'Content-Length': req.content_length}
if not self.cluster_config:
# buffer first blocks of tar file
# and search for the system map
cached_body = CachedBody(req_iter)
self.read_system_map(
cached_body.cache,
self.middleware.network_chunk_size,
req.content_type,
req
)
if not self.cluster_config:
raise HTTPBadRequest(request=req,
body='System boot map was not '
'found in request')
req_iter = iter(cached_body)
if not self.image_resp:
self.image_resp = Response(app_iter=req_iter,
headers=headers)
self.image_resp.nodes = []
try:
cluster_config = json.loads(self.cluster_config)
return cluster_config
except Exception:
raise HTTPUnprocessableEntity(body='Could not parse '
'system map')
def _system_map_cluster_config(self, req, req_iter):
# System map was sent as a POST body
if not self.cluster_config:
self.read_json_job(req, req_iter)
try:
cluster_config = json.loads(self.cluster_config)
return cluster_config
except Exception:
raise HTTPUnprocessableEntity(
body='Could not parse system map')
def _script_cluster_config(self, req, req_iter):
if 'content-length' not in req.headers:
raise HTTPBadRequest(request=req,
body='Must specify Content-Length')
cached_body = CachedBody(req_iter)
# all scripts must start with shebang
if not cached_body.cache[0].startswith('#!'):
raise HTTPBadRequest(request=req,
body='Unsupported Content-Type')
buf = ''
shebang = None
for chunk in cached_body.cache:
i = chunk.find('\n')
if i > 0:
shebang = buf + chunk[0:i]
break
buf += chunk
if not shebang:
raise HTTPBadRequest(request=req,
body='Cannot find '
'shebang (#!) in script')
command_line = re.split('\s+',
re.sub('^#!\s*(.*)', '\\1', shebang), 1)
sysimage = None
args = None
exe_path = command_line[0]
location = parse_location(exe_path)
if not location:
raise HTTPBadRequest(request=req,
body='Bad interpreter %s' % exe_path)
if isinstance(location, ImagePath):
if 'image' == location.image:
raise HTTPBadRequest(request=req,
body='Must supply image name '
'in shebang url %s'
% location.url)
sysimage = location.image
if len(command_line) > 1:
args = command_line[1]
params = {'exe_path': exe_path}
if args:
params['args'] = args.strip() + " "
if self.container_name and self.object_name:
template = POST_TEXT_OBJECT_SYSTEM_MAP
location = SwiftPath.init(self.account_name,
self.container_name,
self.object_name)
config = _config_from_template(params, template, location.url)
else:
template = POST_TEXT_ACCOUNT_SYSTEM_MAP
config = _config_from_template(params, template, '')
try:
cluster_config = json.loads(config)
except Exception:
raise HTTPUnprocessableEntity(body='Could not parse '
'system map')
if sysimage:
cluster_config[0]['file_list'].append({'device': sysimage})
string_path = Path(REGTYPE,
'script',
int(req.headers['content-length']),
cached_body)
stream = TarStream(path_list=[string_path])
stream_length = stream.get_total_stream_length()
self.image_resp = Response(app_iter=iter(stream),
headers={
'Content-Length': stream_length})
self.image_resp.nodes = []
return cluster_config
def _get_cluster_config_data_resp(self, req):
chunk_size = self.middleware.network_chunk_size
# request body from user:
req_body = req.environ['wsgi.input']
req_iter = iter(lambda: req_body.read(chunk_size), '')
data_resp = None
# If x-zerovm-source header is specified in the client request,
# we need to read the system.map from somewhere else other than the
# request body. (In the case of sending a zapp in the request body, we
# just read the system.map from from the zapp tarball.)
source_header = req.headers.get('X-Zerovm-Source')
if source_header:
req, req_iter, data_resp = self._process_source_header(
req, source_header
)
# Who will be billed for this job? Who is allowed to execute it?
# We don't check for read_acl or write_acl; we only check for execution
# rights.
# By default, only the owner of the account is allowed to execute
# things in the account.
# Other users (including anonymous) can be given permission to others,
# but the owner will be billed. Permission is given via
# `X-Container-Meta-Zerovm-Suid`.
#
# We don't remove the auth here because we need to check for read/write
# permissions on various objects/containers later on (in this request
# or subsequent chained requests).
self.authorize_job(req, remove_auth=False)
req.path_info = '/' + self.account_name
req.bytes_transferred = 0
if req.content_type in TAR_MIMES:
# Tarball (presumably with system.map) has been POSTed
cluster_conf_dict = self._tarball_cluster_config(req, req_iter)
elif req.content_type in 'application/json':
cluster_conf_dict = self._system_map_cluster_config(req, req_iter)
else:
# assume the posted data is a script and try to execute
cluster_conf_dict = self._script_cluster_config(req, req_iter)
try:
def replica_resolver(account, container):
if self.middleware.ignore_replication:
return 1
container_info = self.container_info(account, container, req)
ring = self.app.get_object_ring(
container_info['storage_policy'])
return ring.replica_count
cluster_config = self.parser.parse(
cluster_conf_dict,
self.image_resp is not None,
self.account_name,
replica_resolver=replica_resolver,
request=req
)
except ClusterConfigParsingError, e:
self.app.logger.warn(
'ERROR Error parsing config: %s', cluster_conf_dict)
raise HTTPBadRequest(request=req, body=str(e))
# NOTE(larsbutler): `data_resp` is None if there is no x-zerovm-source
# header; see above.
return cluster_config, data_resp
def _process_source_header(self, req, source_header):
# The client can execute code on ZeroCloud in 7 different ways:
# 1) POSTing a script to /version/account; the script contents
# must have a `#!file://...` header.
# 2) POSTing a System Map to /version/account.
# 3) POSTing a zapp (a tarbal with a System Map inside it)
# 4) GET using the "open" method. That is, a object can be fetched
# from Swift/ZeroCloud and processed by a zapp on the fly.
# 5) REST API using open/1.0 method: Requests are submitted to
# /version/account/container/zapp_object, and can include a
# query string).
# 6) REST API using api/1.0 method: Requests are submitted to a
# /version/account/container/plus/any/arbitrary/path and query
# string. The `container` must exist, but can be empty. Requests
# will be handled by the zapp specified in the
# `X-Container-Meta-Rest-Endpoint` header set on the
# `container`. The value of this header is a swift path:
# `swift://account/container/zapp_object` for example.
# 7) If the user uses methods 1, 2, or 3, and sets the
# `X-Zerovm-Source` header, the POST contents will instead be
# treated simply as input data to the request, and the request
# will be serviced by the zapp specified in the
# `X-Zerovm-Source` header.
#
# No matter what, all requests which reach `post_job` will be 1 of
# 3 types:
# - A script
# - A tarball
# - A System Map
#
# RestController and ApiController (which process open/1.0 and
# api/1.0 requests, respectively) will convert REST API requests to
# the System Map case.
#
# The user can explicitly specify `X-Zerovm-Source` in cases 1-3,
# which will change the request to case 7. Cases 5 and 6 will
# implicitly set the `X-Zerovm-Source`; the user has no control
# over this.
req_body = req.environ['wsgi.input']
source_loc = parse_location(unquote(source_header))
if not isinstance(source_loc, SwiftPath):
return HTTPPreconditionFailed(
request=req,
body='X-Zerovm-Source format is '
'swift://account/container/object')
data_resp = None
if req.content_length:
data_resp = Response(
app_iter=iter(
lambda: req_body.read(self.middleware.network_chunk_size),
''
),
headers={
'Content-Length': req.content_length,
'Content-Type': req.content_type})
data_resp.nodes = []
source_loc.expand_account(self.account_name)
source_req = make_subrequest(req.environ, method='GET',
swift_source='zerocloud')
source_req.path_info = source_loc.path
source_req.query_string = None
source_req.headers['x-zerovm-source'] = req.headers['x-zerovm-source']
# If the `X-Zerovm-Source` is set--and it is in this case--we need
# to check that submitter of the request EITHER has Read ACL
# permissions OR Setuid permissions to the zapp specified in
# `X-Zerovm-Source`.
#
# Again, we are only checking authorization on
# the object specified in `X-Zerovm-Source`.
#
# If Read ACL check succeeds, continue executing this request.
# Else, check if Setuid is allowed.
# If Setuid is allowed, continue executing this request.
# Else, raise an HTTP 403 error.
self.authorize_job(source_req, acl='read_acl', save_env=req.environ)
sink_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
source_resp = source_req.get_response(self.app)
if not is_success(source_resp.status_int):
return source_resp
sink_req.content_length = source_resp.content_length
sink_req.content_type = source_resp.headers['Content-Type']
sink_req.etag = source_resp.etag
req_iter = iter(source_resp.app_iter)
req = sink_req
return req, req_iter, data_resp
def _create_sysmap_resp(self, node):
sysmap = node.dumps()
return Response(app_iter=iter([sysmap]),
headers={'Content-Length': str(len(sysmap))})
def post_job(self, req):
chunk_size = self.middleware.network_chunk_size
if 'content-type' not in req.headers:
return HTTPBadRequest(request=req,
body='Must specify Content-Type')
cluster_config, data_resp = self._get_cluster_config_data_resp(req)
if not self.cgi_env:
self.cgi_env = self.create_cgi_env(req)
# List of `swift.common.swob.Request` objects
data_sources = []
if self.exe_resp:
self.exe_resp.nodes = []
data_sources.append(self.exe_resp)
# Address of this machine for remote machines to connect to:
addr = self._get_own_address()
if not addr:
return HTTPServiceUnavailable(
body='Cannot find own address, check zerovm_ns_hostname')
ns_server = None
# Start the `NameService`, if necessary.
# If the network type is 'tcp' (ZeroVM+ZeroMQ networking) and there is
# more than one node in the cluster config, we need a name service.
# NOTE(larsbutler): If there's only one node, we don't need networking,
# and thus, don't need to start the name service.
if (self.middleware.network_type == 'tcp'
and cluster_config.total_count > 1):
ns_server = NameService(cluster_config.total_count)
if self.middleware.zerovm_ns_thrdpool.free() <= 0:
return HTTPServiceUnavailable(
body='Cluster slot not available',
request=req)
ns_server.start(self.middleware.zerovm_ns_thrdpool)
if not ns_server.port:
# no free ports
return HTTPServiceUnavailable(body='Cannot bind name service')
# exec_requests: Send these to the appropriate object servers
exec_requests = []
# NOTE(larsbutler): if not data_resp and load_data_resp: chain = True
load_data_resp = True
# self.parser.node_list defines all of the zerovm instances --
# including replicates -- that will be launched for this job (or for
# this part of the chain)
# NOTE(larsbutler): 'node' == 'object server'
# FIXME(larsbutler): This loop is too long; we should abstract out some
# pieces of this.
for node in cluster_config.nodes.itervalues():
nexe_headers = HeaderKeyDict({
'x-nexe-system': node.name,
'x-nexe-status': 'ZeroVM did not run',
'x-nexe-retcode': 0,
'x-nexe-etag': '',
'x-nexe-validation': 0,
'x-nexe-cdr-line': '0.0 0.0 0 0 0 0 0 0 0 0',
'x-nexe-policy': '',
'x-nexe-colocated': '0'
})
path_info = req.path_info
# Copy the request path, environ, and headers from the client
# request into the new request.
# NOTE(larsbutler): The `path_info` is overwritten below. The
# `Request.blank` method just requires a valid path for the first
# arg.
exec_request = Request.blank(path_info,
environ=req.environ,
headers=req.headers)
# Each node has its own request `path_info`.
# `node.path_info` can be:
# - /account
# - /account/container
# - /account/container/object
# If the path is just /account, there no object to co-locate with
# so it doesn't matter _where_ we execute this request.
# If the path contains container or container/object, we need to
# co-locate it with the container or object (whichever the case may
# be).
exec_request.path_info = node.path_info
if 'zerovm.source' in req.environ:
# `zerovm.source` is needed for authorization and job chaining.
# It's a "hidden" variable; the client never sees it, neither
# in the response nor the untrusted code execution environment.
exec_request.environ['zerovm.source'] = (
req.environ['zerovm.source']
)
# x-zerovm-access is set to "GET" (read) or "PUT" (write)
exec_request.headers['x-zerovm-access'] = node.access
# NOTE(larsbutler): We don't set the etag here because we're
# iteratively streaming our request contents, and thus, we don't
# know the entire contents of the stream. (In order to calculate a
# proper hash/etag, we would have to buffer the entire contents,
# which could be several GiBs.
exec_request.etag = None
# We are sending a tar stream to object server, that's why we set
# the content-type here.
exec_request.content_type = TAR_MIMES[0]
# We need to check for access to the local object.
# The job is co-located to the file it processes.
# Object is never fetched; it is just read from disk.
# We do the authorization check BEFORE the job is sent to the
# target object server.
# We can only co-locate with one object; if a node has access to
# more objects, the "remote" objects must be fetched.
#
# Job description can specify the "attach" attribute to explicitly
# attaches to a specific object/node as the local object.
# Otherwise, we apply some heuristics to decide where to put the
# job. `ConfigParser` has this logic. Here the gist of the
# heuristics:
# - Read trumps write. If you have read and write, you will be
# attached to read.
# - If you have a "script" channel; although it is "read", you will
# never be attached to that because more than likely, the data
# you process with the script willl be much bigger than the
# script itself.
# - If you have multiple read channels and no script, behavior is
# undefined.
acl = None
if node.access == 'GET':
acl = 'read_acl'
elif node.access == 'PUT':
acl = 'write_acl'
if acl:
# NOTE(larsbutler): We could probably remove the auth here,
# since this request is a new one, and won't be used later.
# The `exec_request` is only used to send a job to the object
# server.
self.authorize_job(exec_request, acl=acl, remove_auth=False)
# chunked encoding handling looks broken in Swift
# but let's leave it here, maybe somebody will fix it:
# exec_request.content_length = None
# exec_request.headers['transfer-encoding'] = 'chunked'
# FIXME(larsbutler): x-account-name is a deprecated header,
# probably we can remove this. Also, account-name could be
# different from request url, since we have the
# acl/setuid execution feature available to other users.
exec_request.headers['x-account-name'] = self.account_name
# Proxy sends timestamp to each object server in advance
# for each object that the objserver will create.
# So if this job creates multiple objects, it will use this
# timestamp.
# FIXME(larsbutler): Just create ONE timestamp per job (outside of
# this loop at the beginning of `post_job`).
exec_request.headers['x-timestamp'] = \
normalize_timestamp(time.time())
# NOTE(larsbutler): We explicitly set `x-zerovm-valid` to 'false';
# the client CANNOT be allowed to dictate this. Validity will be
# resolved later.
exec_request.headers['x-zerovm-valid'] = 'false'
# If there are multiple nodes but they are not connected (no
# networking), we can use the default pool/scheduling:
exec_request.headers['x-zerovm-pool'] = 'default'
# If we are using networking, use the 'cluster' pool.
# FIXME(larsbutler): If we are using zvm:// URLs to communicate
# between execution groups (zvm:// means something like "pipes"),
# we should probably also use the cluster pool. With the current
# implementation, a job with zvm:// URLs would not use the cluster
# pool. Probably this needs to be revisited.
if len(node.connect) > 0 or len(node.bind) > 0:
# Execution node operation depends on connections to other
# nodes.
# We use a special pool for cluster jobs because we want to
# start all nodes in a cluster job at the same time; we don't
# want some nodes in the job to be queued and leave others
# waiting.
# NOTE(larsbutler): In other words, we run all at once (or not
# at all, apparently?).
exec_request.headers['x-zerovm-pool'] = 'cluster'
if ns_server:
node.name_service = 'udp:%s:%d' % (addr, ns_server.port)
if cluster_config.total_count > 1:
# FIXME(larsbutler): Make `build_connect_string` return a
# value, and assign instead of mutating `node` inside the
# method.
self.parser.build_connect_string(
node, req.headers.get('x-trans-id'))
# Replication directive comes from system.map ("replicate")
# which is usually, 0, 1 or 3 (since we have by default 3
# replicas in a typical cluster; depends on the swift config,
# though).
if node.replicate > 1:
# We need an ADDITIONAL N-1 copies to bring the total
# number of copies to N.
for i in range(0, node.replicate - 1):
node.replicas.append(deepcopy(node))
# Generate an ID for the replica.
# IDs are generated in the following way:
# Say there are 4 nodes in the job, with IDs 1, 2, 3,
# and 4 respectively.
# We calculate replica IDs such that the first set of
# replicas will have sequential IDs start after the
# highest ID in the original node set.
# The second set of replicas does that same thing,
# except the IDs start after the highest ID in the
# first set of replicas. For example:
#
# orig: 1 2 3 4 <-- 4 execution nodes in the job
# rep1: 5 6 7 8 <-- first set of replicas
# rep2: 9 10 11 12 <-- second set of replicas
# rep3: 13 14 15 16 <-- third set
# and so on..
node.replicas[i].id = \
node.id + (i + 1) * len(cluster_config.nodes)
# each exec requests needs a copy of the cgi env stuff (vars)
node.copy_cgi_env(request=exec_request, cgi_env=self.cgi_env)
# we create a fake data source
# a fake response containing the system.map just now created for
# this object server:
resp = self._create_sysmap_resp(node)
# adds the response to two places:
# 1) add it to "master" list of data sources ->why? so we don't
# redundantly fetch data; we cache and reuse
# 2) add it to node-specific list of data sources
node.add_data_source(data_sources, resp, 'sysmap')
for repl_node in node.replicas:
# repeat the above for each replica:
repl_node.copy_cgi_env(request=exec_request,
cgi_env=self.cgi_env)
resp = self._create_sysmap_resp(repl_node)
repl_node.add_data_source(data_sources, resp, 'sysmap')
# for each node, we want to know the remote objects it needs to
# reference
channels = node.get_list_of_remote_objects()
for ch in channels:
# Translate channels into data sources.
# A data source is just a response from the object server.
# It's a byte stream with headers (and content-type).
# Some responses could have no content-type, because we know
# their content-type beforehand (system.map is json, for
# example).
# TODO(larsbutler): raise errors instead of returning them
error = self._create_request_for_remote_object(data_sources,
ch,
req,
nexe_headers,
node)
if error:
return error
# the user sent us a zapp or other tar image
if self.image_resp:
# image is user data and must go last
node.last_data = self.image_resp
self.image_resp.nodes.append({'node': node,
'dev': 'image'})
for repl_node in node.replicas:
repl_node.last_data = self.image_resp
self.image_resp.nodes.append({'node': repl_node,
'dev': 'image'})
# NOTE(larsbutler): The following block was added for job chaining.
if node.data_in:
# We have "data" in the request body.
# That is, we used x-zerovm-source and request request body
# contains "data input" instead of an application tarball or
# system.map.
# `data_resp` is None, there was no x-zerovm-source in the
# client request.
if not data_resp and load_data_resp:
# This can occur at any point in a chained job (either the
# first part of the chain or a subsequent part).
data_resp = self._load_input_from_chain(req, chunk_size)
load_data_resp = False
if data_resp:
node.last_data = data_resp
data_resp.nodes.append({'node': node,
'dev': 'stdin'})
for repl_node in node.replicas:
repl_node.last_data = data_resp
data_resp.nodes.append({'node': repl_node,
'dev': 'stdin'})
exec_request.node = node
exec_request.resp_headers = nexe_headers
# If possible, try to submit the job to a daemon.
# This is an internal optimization.
sock = self.get_daemon_socket(node)
if sock:
exec_request.headers['x-zerovm-daemon'] = str(sock)
exec_requests.append(exec_request)
# End `exec_requests` loop
# We have sent request for each "data source" (object to be used by the
# execution), and we have a response which includes the headers (but we
# haven't read the body yet). See `_create_request_for_remote_object`.
# This is includes fake data sources that we made up on the fly (like
# for system.maps).
if self.image_resp and self.image_resp.nodes:
# if the user sent a tar/zapp, then we will have an image_resp (not
# None) or image was specified by x-zerovm-source
data_sources.append(self.image_resp)
if data_resp and data_resp.nodes:
# if and only if image resp is set by by x-zerovm-source
data_sources.append(data_resp)
tstream = TarStream()
for data_src in data_sources:
# this loop calculates the sizes of all of the streams for the
# nodes
# NOTE: 1 stream will have multiple files
# we have one stream channel between the proxy and each object node
for n in data_src.nodes:
# Get node size. "Node size" is the size of the stream that
# will be passed to that node.
if not getattr(n['node'], 'size', None):
# if it's not set, initialize to 0
n['node'].size = 0
n['node'].size += len(tstream.create_tarinfo(
ftype=REGTYPE,
name=n['dev'],
size=data_src.content_length))
n['node'].size += \
TarStream.get_archive_size(data_src.content_length)
# We have calclated the content_length of the requests
# Using greenlet pool/pile, start execution of each part of the job on
# the object nodes.
pile = GreenPileEx(cluster_config.total_count)
conns = self._make_exec_requests(pile, exec_requests)
if len(conns) < cluster_config.total_count:
self.app.logger.exception(
'ERROR Cannot find suitable node to execute code on')
for conn in conns:
close_swift_conn(getattr(conn, 'resp'))
return HTTPServiceUnavailable(
body='Cannot find suitable node to execute code on')
for conn in conns:
if hasattr(conn, 'error'):
if hasattr(conn, 'resp'):
close_swift_conn(conn.resp)
return Response(app_iter=[conn.error],
status="%d %s" % (conn.resp.status,
conn.resp.reason),
headers=conn.nexe_headers)
_attach_connections_to_data_sources(conns, data_sources)
# chunked encoding handling looks broken in Swift
# but let's leave it here, maybe somebody will fix it
# chunked = req.headers.get('transfer-encoding')
chunked = False
try:
with ContextPool(cluster_config.total_count) as pool:
self._spawn_file_senders(conns, pool, req)
for data_src in data_sources:
# FIXME: don't attach bytes_transferred to this object
# kinda ugly
data_src.bytes_transferred = 0
_send_tar_headers(chunked, data_src)
while True:
with ChunkReadTimeout(self.middleware.client_timeout):
try:
data = next(data_src.app_iter)
except StopIteration:
# TODO: return num of bytes transfered
error = _finalize_tar_streams(chunked,
data_src,
req)
if error:
return error
break
# TODO: return num of bytes transfered
error = _send_data_chunk(chunked, data_src, data, req)
if error:
return error
if data_src.bytes_transferred < data_src.content_length:
return HTTPClientDisconnect(
request=req,
body='data source %s dead' % data_src.__dict__)
for conn in conns:
if conn.queue.unfinished_tasks:
# wait for everything to finish
conn.queue.join()
conn.tar_stream = None
except ChunkReadTimeout, err:
self.app.logger.warn(
'ERROR Client read timeout (%ss)', err.seconds)
self.app.logger.increment('client_timeouts')
# FIXME: probably we need to expand on what caused the error
# it could be ANY data source that timed out
# this can include the client request; this is ALLLLLL data sources
# Only above do we begin to read from all sources
return HTTPRequestTimeout(request=req)
except (Exception, Timeout):
print traceback.format_exc()
self.app.logger.exception(
'ERROR Exception causing client disconnect')
return HTTPClientDisconnect(request=req, body='exception')
# we have successfully started execution and sent all data sources
for conn in conns:
# process all of the responses in parallel
pile.spawn(self._process_response, conn, req)
# x-zerovm-deferred means, run the job async and close the client
# connection asap -> results are saved into swift
do_defer = req.headers.get('x-zerovm-deferred', 'never').lower()
if do_defer == 'always':
# 0 means timeout immediately
defer_timeout = 0
elif do_defer == 'auto':
defer_timeout = self.middleware.immediate_response_timeout
else:
# None means no timeout
defer_timeout = None
conns = []
try:
with Timeout(seconds=defer_timeout):
for conn in pile:
if conn:
conns.append(conn)
except Timeout:
# if timeout is 0, we immediately get an exception (the case where
# x-zerovm-deferred is specified)
# if timeout is None, we never get an exception
# if timeout is > 0, we might get a timeout exception
def store_deferred_response(deferred_url):
"""
:param str deferred_url:
Path where we will try to store the result object.
A swift:// url.
"""
# GreenPileEx iterator blocks until the next result is ready
for conn in pile:
if conn:
conns.append(conn)
resp = self.create_final_response(conns, req)
path = SwiftPath(deferred_url)
container_info = get_info(self.app, req.environ.copy(),
path.account, path.container,
ret_not_found=True)
if container_info['status'] == HTTP_NOT_FOUND:
# container doesn't exist (yet)
# try to create the container
cont_req = Request(req.environ.copy())
cont_req.path_info = '/%s/%s' % (path.account,
path.container)
cont_req.method = 'PUT'
cont_resp = \
ContainerController(self.app,
path.account,
path.container).PUT(cont_req)
if cont_resp.status_int >= 300:
self.app.logger.warn(
'Failed to create deferred container: %s'
% cont_req.url)
return
# this would normally get returned to the client, but since
# we're deferred, the client has already disconnected
resp.input_iter = iter(resp.app_iter)
# subsequent consumption of this response expects an object
# with a read() method
def iter_read(chunk_size=None):
# if a chunk size is specified, it shouldn't be a big deal
# because this will only be read by other parts of
# ZeroCloud middleware.
if chunk_size is None:
return ''.join(resp.input_iter)
chunk = next(resp.input_iter)
return chunk
resp.read = iter_read
# Create the new object to store the results (from `resp`):
deferred_put = Request(req.environ.copy())
deferred_put.path_info = path.path
deferred_put.method = 'PUT'
deferred_put.environ['wsgi.input'] = resp
deferred_put.content_length = resp.content_length
deferred_resp = ObjectController(self.app,
path.account,
path.container,
path.obj).PUT(deferred_put)
if deferred_resp.status_int >= 300:
# TODO(larsbutler): should this be a critical error?
self.app.logger.warn(
'Failed to create deferred object: %s : %s'
% (deferred_put.url, deferred_resp.status))
# we don't return here, at least we should try to store the
# headers (before we think about returning an error)
report = self._create_deferred_report(resp.headers)
resp.input_iter = iter([report])
deferred_put = Request(req.environ.copy())
# we not only store the result (in an object),
# but we also store the headers (in a separate object)
deferred_put.path_info = path.path + '.headers'
deferred_put.method = 'PUT'
deferred_put.environ['wsgi.input'] = resp
deferred_put.content_length = len(report)
deferred_resp = \
ObjectController(self.app,
path.account,
path.container,
path.obj + '.headers').PUT(deferred_put)
if deferred_resp.status_int >= 300:
self.app.logger.warn(
'Failed to create deferred object: %s : %s'
% (deferred_put.url, deferred_resp.status))
# End `store_deferred_response`.
# request url can be:
# /account
# /account/container
# /account/container/object
# we will have a container if the request url includes a container
if self.container_name:
# either it's the container where the job was running
container = self.container_name
else:
# or it's the configured directory, like `.zvm`
container = self.middleware.zerovm_registry_path
# We will have an object name if the request url includes an
# object.
if self.object_name:
obj = self.object_name
else:
# Otherwise we just generate an object ID.
obj = 'job-%s' % uuid.uuid4()
# TODO(larsbutler): Use `SwiftPath.create_url()` here.
deferred_path = SwiftPath.init(self.account_name, container, obj)
resp = Response(request=req,
body=deferred_path.url)
# spawn it with any thread that can handle it
spawn_n(store_deferred_response, deferred_path.url)
# FIXME(larsbutler): We might want to stop the name server at the
# end of store_deferred_response, instead of here.
if ns_server:
ns_server.stop()
# return immediately, our job is likely still running
return resp
# end of deferred/timeout case
if ns_server:
# If we are running with networking and have a NameService server,
# stop the server.
ns_server.stop()
return self.create_final_response(conns, req)
def process_server_response(self, conn, request, resp):
# Process object server response (responses from requests which are
# co-located with an object).
conn.resp = resp
if not is_success(resp.status_int):
conn.error = resp.body
return conn
if resp.content_length == 0:
return conn
if 'x-nexe-error' in resp.headers:
resp.status = 500
node = conn.cnode
untar_stream = UntarStream(resp.app_iter)
bytes_transferred = 0
while True:
try:
data = next(untar_stream.tar_iter)
except StopIteration:
break
untar_stream.update_buffer(data)
info = untar_stream.get_next_tarinfo()
while info:
# We can store arbitrary key-value metadata in the tar; let's
# grab those.
headers = info.get_headers()
chan = node.get_channel(device=info.name)
if not chan:
conn.error = 'Channel name %s not found' % info.name
return conn
# If there is a path, something needs to be saved back into the
# Swift data store.
if not chan.path:
# If there is no path, send the data back to the client.
app_iter = iter(CachedBody(
untar_stream.tar_iter,
cache=[untar_stream.block[info.offset_data:]],
total_size=info.size))
resp.app_iter = app_iter
resp.content_length = headers['Content-Length']
resp.content_type = headers['Content-Type']
check_headers_metadata(resp, headers, 'object', request,
add_all=True)
if resp.headers.get('status'):
resp.status = resp.headers['status']
del resp.headers['status']
return conn
dest_req = Request.blank(chan.path.path,
environ=request.environ,
headers=request.headers)
# NOTE(larsbutler): We have to override the `path_info`, since
# the `Request.blank` chops any path down to /<account>/auth.
dest_req.path_info = chan.path.path
dest_req.query_string = None
dest_req.method = 'PUT'
dest_req.headers['content-length'] = headers['Content-Length']
untar_stream.to_write = info.size
untar_stream.offset_data = info.offset_data
dest_req.environ['wsgi.input'] = ExtractedFile(untar_stream)
dest_req.content_type = headers['Content-Type']
check_headers_metadata(dest_req, headers, 'object', request)
try:
# Check if the user is authorized to write to the specified
# channel/object.
# If user has write permissions, continue.
# Else, check is user has Setuid.
# If user has Setuid, continue.
# Else, raise a 403.
self.authorize_job(dest_req, acl='write_acl')
dest_resp = \
ObjectController(self.app,
chan.path.account,
chan.path.container,
chan.path.obj).PUT(dest_req)
except HTTPException as error_resp:
dest_resp = error_resp
if dest_resp.status_int >= 300:
conn.error = 'Status %s when putting %s' \
% (dest_resp.status, chan.path.path)
if resp.status_int < dest_resp.status_int:
resp.status = dest_resp.status
return conn
info = untar_stream.get_next_tarinfo()
bytes_transferred += len(data)
untar_stream = None
# we should be done reading, but just for sanity, we set the
# content-length to 0 so we don't try to read anyway
# TODO(larsbutler): If it's not already 0, we should probably raise an
# error. We need double check this.
resp.content_length = 0
return conn
def _process_response(self, conn, request):
conn.error = None
chunk_size = self.middleware.network_chunk_size
if conn.resp:
# success
server_response = conn.resp
resp = Response(status='%d %s' %
(server_response.status,
server_response.reason),
app_iter=iter(lambda: server_response.read(
chunk_size), ''),
headers=dict(server_response.getheaders()))
else:
# got "continue"
# no response yet; we need to read it
try:
with Timeout(self.middleware.node_timeout):
server_response = conn.getresponse()
resp = Response(status='%d %s' %
(server_response.status,
server_response.reason),
app_iter=iter(lambda: server_response.read(
chunk_size), ''),
headers=dict(server_response.getheaders()))
except (Exception, Timeout):
self.app.exception_occurred(
conn.node, 'Object',
'Trying to get final status of POST to %s'
% request.path_info)
resp = HTTPRequestTimeout(
body='Timeout: trying to get final status of POST '
'to %s' % request.path_info)
return self.process_server_response(conn, request, resp)
def _connect_exec_node(self, obj_nodes, part, request,
logger_thread_locals, cnode, request_headers,
known_nodes, salt):
"""Do the actual execution.
:param obj_nodes:
Iterator of object node `dict` objects, each with the following
keys:
* id
* ip
* port
* zone
* region
* device
* replication_ip
* replication_port
:param int part:
Partition ID.
:param request:
`swift.common.swob.Request` instance.
:param logger_thread_locals:
2-tuple of ("transaction-id", logging.Logger).
:param cnode:
:class:`zerocloud.configparser.ZvmNode` instance.
:param request_headers:
Dict of request headers.
:param list known_nodes:
See `_make_exec_requests` for usage info.
:param str salt:
Generated unique ID for a given server.
"""
self.app.logger.thread_locals = logger_thread_locals
conn = None
for node in chain(known_nodes, obj_nodes):
# this loop is trying to connect to candidate object servers (for
# execution) and send the execution request headers
# if we get an exception, we can keep trying on other nodes
if ((known_nodes and node in known_nodes)
# this is the first node we are trying to use for
# co-location
or (not known_nodes and cnode.location)):
request_headers['x-nexe-colocated'] = \
'%s:%s:%s' % (salt, node['ip'], node['port'])
try:
with ConnectionTimeout(self.middleware.conn_timeout):
request_headers['Expect'] = '100-continue'
request_headers['Content-Length'] = str(cnode.size)
# NOTE(larsbutler): THIS line right here kicks off the
# actual execution.
conn = http_connect(node['ip'], node['port'],
node['device'], part, request.method,
request.path_info, request_headers)
# If we get here, it means object started reading our
# requests, read all headers until the body, processed the
# headers, and has now issued a read on the body
# but we haven't sent any data yet
with Timeout(self.middleware.node_timeout):
resp = conn.getexpect()
# node == the swift object server we are connected to
conn.node = node
# cnode == the zerovm node
conn.cnode = cnode
conn.nexe_headers = request.resp_headers
if resp.status == HTTP_CONTINUE:
conn.resp = None
return conn
elif is_success(resp.status):
conn.resp = resp
return conn
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
# increase the error count for this node
# to optimize, the proxy server can use this count to limit
# the number of requests send to this particular object
# node.
self.app.error_limit(node,
'ERROR Insufficient Storage')
conn.error = 'Insufficient Storage'
# the final response is `resp`, which is error
# could be disk failed, etc.
conn.resp = resp
resp.nuke_from_orbit()
elif is_client_error(resp.status):
conn.error = resp.read()
conn.resp = resp
if resp.status == HTTP_NOT_FOUND:
# it could be "not found" because either a) the object
# doesn't exist or b) just this server doesn't have a
# copy
# container or object was either not found, or due to
# eventual consistency, it can't be found here right
# now
# so, we try to continue and look for it elsewhere
# the 404 error here doesn't include the url, so we
# include it here (so the client so they know which url
# has a problem)
conn.error = 'Error %d %s while fetching %s' \
% (resp.status, resp.reason,
request.path_info)
else:
# don't keep trying; this is user error
return conn
else:
# unknown error
# some 500 error that's not insufficient storage
self.app.logger.warn('Obj server failed with: %d %s'
% (resp.status, resp.reason))
conn.error = resp.read()
conn.resp = resp
resp.nuke_from_orbit()
# we still keep trying; maybe we'll have better luck on
# another replicate (could be a problem with threadpool,
# etc.)
except Exception:
self.app.exception_occurred(node, 'Object',
'Expect: 100-continue on %s'
% request.path_info)
if getattr(conn, 'resp'):
conn.resp.nuke_from_orbit()
conn = None
if conn:
return conn
def _store_accounting_data(self, request, connection=None):
# FIXME(larsbutler): We're not even sure if this still works.
txn_id = request.environ['swift.trans_id']
acc_object = datetime.datetime.utcnow().strftime('%Y/%m/%d.log')
if connection:
# If connection is not None, only cache accounting data on the
# input ``request`` object; nothing actually gets saved.
body = '%s %s %s (%s) [%s]\n' % (
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),
txn_id,
connection.nexe_headers['x-nexe-system'],
connection.nexe_headers['x-nexe-cdr-line'],
connection.nexe_headers['x-nexe-status'])
request.cdr_log.append(body)
self.app.logger.info('zerovm-cdr %s %s %s (%s) [%s]'
% (self.account_name,
txn_id,
connection.nexe_headers['x-nexe-system'],
connection.nexe_headers['x-nexe-cdr-line'],
connection.nexe_headers['x-nexe-status']))
else:
# Here, something is actually saved
body = ''.join(request.cdr_log)
append_req = Request.blank('/%s/%s/%s/%s'
% (self.middleware.version,
self.middleware.cdr_account,
self.account_name,
acc_object),
headers={'X-Append-To': '-1',
'Content-Length': len(body),
'Content-Type': 'text/plain'},
body=body)
append_req.method = 'POST'
resp = append_req.get_response(self.app)
if resp.status_int >= 300:
self.app.logger.warn(
'ERROR Cannot write stats for account %s',
self.account_name)
def _create_deferred_report(self, headers):
# just dumps headers as a json object for now
return json.dumps(dict(headers))
@delay_denial
@cors_validation
def GET(self, req):
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def PUT(self, req):
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def DELETE(self, req):
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def HEAD(self, req):
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def POST(self, req):
return self.post_job(req)
def authorize_job(self, req, acl=None, remove_auth=True, save_env=None):
"""
Authorizes a request using the acl attribute and authorize() function
from environment
:param req: `swob.Request` instance that we are authorizing
:param acl: type of the acl we read from container info
:param remove_auth: if True will remove authorize() from environment
:param save_env: if not None will save container info in the
provided environment dictionary
:raises: various HTTPException instances
"""
container_info = {'meta': {}}
source_header = req.headers.get('X-Zerovm-Source')
try:
if 'swift.authorize' in req.environ:
version, account, container, obj = \
split_path(req.path, 2, 4, True)
if 'zerovm.source' in req.environ:
container_info = req.environ['zerovm.source']
source_header = container_info['meta'].get('rest-endpoint')
if container:
container_info = self.container_info(account, container)
if acl:
req.acl = container_info.get(acl)
aresp = req.environ['swift.authorize'](req)
if aresp and container_info:
setuid_acl = container_info['meta'].get('zerovm-suid')
endpoint = container_info['meta'].get('rest-endpoint')
if all((source_header, setuid_acl, endpoint)) \
and endpoint == source_header:
req.acl = setuid_acl
aresp = req.environ['swift.authorize'](req)
if aresp:
raise aresp
if remove_auth:
del req.environ['swift.authorize']
except ValueError:
raise HTTPNotFound(request=req)
finally:
if save_env:
save_env['zerovm.source'] = container_info
req.acl = None
class RestController(ClusterController):
config_path = None
def _get_content_config(self, req, content_type):
req.template = None
cont = self.middleware.zerovm_registry_path
obj = '%s/config' % content_type
config_path = '/%s/%s/%s' % (self.account_name, cont, obj)
memcache_client = cache_from_env(req.environ)
memcache_key = 'zvmconf' + config_path
if memcache_client:
req.template = memcache_client.get(memcache_key)
if req.template:
return
config_req = req.copy_get()
config_req.path_info = config_path
config_req.query_string = None
config_resp = ObjectController(
self.app, self.account_name, cont, obj).GET(config_req)
if config_resp.status_int == 200:
req.template = ''
for chunk in config_resp.app_iter:
req.template += chunk
if self.middleware.zerovm_maxconfig < len(req.template):
req.template = None
return HTTPRequestEntityTooLarge(
request=config_req,
body='Config file at %s is too large' % config_path)
if memcache_client and req.template:
memcache_client.set(
memcache_key,
req.template,
time=float(self.middleware.zerovm_cache_config_timeout))
@delay_denial
@cors_validation
def GET(self, req):
resp = self.handle_request(req)
if resp:
return resp
if self.object_name:
return self.handle_object_open(req)
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def POST(self, req):
resp = self.handle_request(req)
if resp:
return resp
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def PUT(self, req):
resp = self.handle_request(req)
if resp:
return resp
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def DELETE(self, req):
resp = self.handle_request(req)
if resp:
return resp
return HTTPNotImplemented(request=req)
@delay_denial
@cors_validation
def HEAD(self, req):
resp = self.handle_request(req)
if resp:
return resp
return HTTPNotImplemented(request=req)
def load_config(self, req, config_path):
self.config_path = config_path
memcache_client = cache_from_env(req.environ)
memcache_key = 'zvmapp' + config_path.path
if memcache_client:
config = memcache_client.get(memcache_key)
if config:
self.cluster_config = config
return None
config_req = req.copy_get()
config_req.path_info = config_path.path
config_req.query_string = None
buffer_length = self.middleware.zerovm_maxconfig * 2
config_req.range = 'bytes=0-%d' % (buffer_length - 1)
config_resp = ObjectController(
self.app, self.account_name,
self.container_name, self.object_name).GET(config_req)
if config_resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
return None
if not is_success(config_resp.status_int) or \
config_resp.content_length > buffer_length:
return config_resp
if config_resp.content_type in TAR_MIMES:
chunk_size = self.middleware.network_chunk_size
config_req.bytes_transferred = 0
self.read_system_map(config_resp.app_iter, chunk_size,
config_resp.content_type, config_req)
if memcache_client and self.cluster_config:
memcache_client.set(
memcache_key,
self.cluster_config,
time=float(self.middleware.zerovm_cache_config_timeout))
return None
def handle_request(self, req):
swift_path = SwiftPath.init(self.account_name, self.container_name,
self.object_name)
error = self.load_config(req, swift_path)
if error:
return error
# if we successfully got config, we know that we have a zapp in hand
if self.cluster_config:
self.cgi_env = self.create_cgi_env(req)
req.headers['x-zerovm-source'] = self.config_path.url
req.method = 'POST'
return self.post_job(req)
return None
def handle_object_open(self, req):
obj_req = req.copy_get()
obj_req.method = 'HEAD'
obj_req.query_string = None
run = False
if self.object_name[-len('.nexe'):] == '.nexe':
# let's get a small speedup as it's quite possibly an executable
obj_req.method = 'GET'
run = True
controller = ObjectController(
self.app,
self.account_name,
self.container_name,
self.object_name)
handler = getattr(controller, obj_req.method, None)
obj_resp = handler(obj_req)
if not is_success(obj_resp.status_int):
return obj_resp
content = obj_resp.content_type
if content == 'application/x-nexe':
run = True
elif run:
# speedup did not succeed...
# still need to read the whole response
for _junk in obj_resp.app_iter:
pass
obj_req.method = 'HEAD'
run = False
template = DEFAULT_EXE_SYSTEM_MAP
error = self._get_content_config(obj_req, content)
if error:
return error
if obj_req.template:
template = obj_req.template
elif not run:
return HTTPNotFound(request=req,
body='No application registered for %s'
% content)
location = SwiftPath.init(self.account_name,
self.container_name,
self.object_name)
self.cluster_config = _config_from_template(req.params, template,
location.url)
self.cgi_env = self.create_cgi_env(req)
post_req = Request.blank('/%s' % self.account_name,
environ=req.environ,
headers=req.headers)
post_req.method = 'POST'
post_req.content_type = 'application/json'
post_req.query_string = req.query_string
if obj_req.method in 'GET':
self.exe_resp = obj_resp
return self.post_job(post_req)
class ApiController(RestController):
def load_config(self, req, config_path):
memcache_client = cache_from_env(req.environ)
memcache_key = 'zvmapp' + config_path.path
if memcache_client:
config = memcache_client.get(memcache_key)
if config:
self.cluster_config, config_path_url = config
self.config_path = SwiftPath(config_path_url)
return None
container_info = self.container_info(config_path.account,
config_path.container)
# This is the zapp which services the endpoint.
source = container_info['meta'].get('rest-endpoint')
if not source:
raise HTTPNotFound(request=req,
body='No API endpoint configured for '
'container %s' % self.container_name)
# REST endpoint source must be a full URL, with no wildcards.
# Otherwise, the account would be ambiguous since the executing user is
# not necessarily the owner of the container.
self.config_path = parse_location(unquote(source))
config_req = req.copy_get()
config_req.path_info = self.config_path.path
config_req.query_string = None
buffer_length = self.middleware.zerovm_maxconfig * 2
config_req.range = 'bytes=0-%d' % (buffer_length - 1)
# Set x-zerovm-source to the zapp configured for the
# x-container-meta-rest-endpoint.
config_req.headers['X-Zerovm-Source'] = self.config_path.url
# We check for read permissions to the zapp which services this
# endpoint.
# If user has read permissions, continue.
# Else, check if user has Setuid permissions.
# If user has Setuid permissions, continue.
# Else, raise and HTTP 403.
self.authorize_job(config_req, acl='read_acl',
save_env=req.environ)
config_resp = ObjectController(
self.app,
self.config_path.account,
self.config_path.container,
# `read_acl` is checked above, since we are doing a GET/read (not a
# PUT/write) to the object server
self.config_path.obj).GET(config_req)
if config_resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
return None
if not is_success(config_resp.status_int) or \
config_resp.content_length > buffer_length:
return config_resp
if config_resp.content_type in TAR_MIMES:
chunk_size = self.middleware.network_chunk_size
config_req.bytes_transferred = 0
self.read_system_map(config_resp.app_iter, chunk_size,
config_resp.content_type, config_req)
elif config_resp.content_type in ['application/json']:
config_req.bytes_transferred = 0
config_req.content_length = config_resp.content_length
self.read_json_job(config_req, config_resp.app_iter)
if memcache_client and self.cluster_config:
memcache_client.set(
memcache_key,
(self.cluster_config, self.config_path.url),
time=float(self.middleware.zerovm_cache_config_timeout))
return None
def _config_from_template(params, template, url):
for k, v in params.iteritems():
if k == 'object_path':
continue
ptrn = r'\{\.%s(|=[^\}]+)\}'
ptrn = ptrn % k
template = re.sub(ptrn, v, template)
config = template.replace('{.object_path}', url)
config = re.sub(r'\{\.[^=\}]+=?([^\}]*)\}', '\\1', config)
return config
def _attach_connections_to_data_sources(conns, data_sources):
"""
:param conns:
`list` of `swift.common.bufferedhttp.BufferedHTTPConnection` objects.
:param data_sources:
`list` of `swift.common.swob.Request` objects.
"""
for data_src in data_sources:
data_src.conns = []
for node in data_src.nodes:
for conn in conns:
if conn.cnode is node['node']:
conn.last_data = node['node'].last_data
data_src.conns.append({'conn': conn, 'dev': node['dev']})
def _queue_put(conn, data, chunked):
conn['conn'].queue.put('%x\r\n%s\r\n'
% (len(data), data) if chunked else data)
def _send_tar_headers(chunked, data_src):
for conn in data_src.conns:
name = conn['dev']
if name == 'image' and data_src.content_type == 'application/x-gzip':
name = 'image.gz'
info = conn['conn'].tar_stream.create_tarinfo(
ftype=REGTYPE,
name=name,
size=data_src.content_length)
for chunk in conn['conn'].tar_stream.serve_chunk(info):
if not conn['conn'].failed:
_queue_put(conn, chunk, chunked)
def _send_data_chunk(chunked, data_src, data, req):
data_src.bytes_transferred += len(data)
if data_src.bytes_transferred > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
for conn in data_src.conns:
for chunk in conn['conn'].tar_stream.serve_chunk(data):
if not conn['conn'].failed:
_queue_put(conn, chunk, chunked)
else:
return HTTPServiceUnavailable(request=req)
def _finalize_tar_streams(chunked, data_src, req):
blocks, remainder = divmod(data_src.bytes_transferred, BLOCKSIZE)
if remainder > 0:
nulls = NUL * (BLOCKSIZE - remainder)
for conn in data_src.conns:
for chunk in conn['conn'].tar_stream.serve_chunk(nulls):
if not conn['conn'].failed:
_queue_put(conn, chunk, chunked)
else:
return HTTPServiceUnavailable(request=req)
for conn in data_src.conns:
if conn['conn'].last_data is data_src:
if conn['conn'].tar_stream.data:
data = conn['conn'].tar_stream.data
if not conn['conn'].failed:
_queue_put(conn, data, chunked)
else:
return HTTPServiceUnavailable(request=req)
if chunked:
conn['conn'].queue.put('0\r\n\r\n')
def _get_local_address(node):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((node['ip'], node['port']))
result = s.getsockname()[0]
s.shutdown(socket.SHUT_RDWR)
s.close()
return result
def gunzip_iter(data_iter, chunk_size):
dec = zlib.decompressobj(16 + zlib.MAX_WBITS)
unc_data = ''
for chunk in data_iter:
while dec.unconsumed_tail:
while len(unc_data) < chunk_size and dec.unconsumed_tail:
unc_data += dec.decompress(dec.unconsumed_tail,
chunk_size - len(unc_data))
if len(unc_data) == chunk_size:
yield unc_data
unc_data = ''
if unc_data and dec.unconsumed_tail:
chunk += dec.unconsumed_tail
break
unc_data += dec.decompress(chunk, chunk_size - len(unc_data))
if len(unc_data) == chunk_size:
yield unc_data
unc_data = ''
if unc_data:
yield unc_data
def filter_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating WSGI proxy apps.
"""
conf = global_conf.copy()
conf.update(local_conf)
def query_filter(app):
return ProxyQueryMiddleware(app, conf)
return query_filter
|
{
"content_hash": "73db6a2acb0775d65582c746759d658e",
"timestamp": "",
"source": "github",
"line_count": 2799,
"max_line_length": 80,
"avg_line_length": 46.123972847445515,
"alnum_prop": 0.5374086955174631,
"repo_name": "zerovm/zerocloud",
"id": "d7999cc88ee11a5e555bc598690ab64966eeb949",
"size": "129101",
"binary": false,
"copies": "1",
"ref": "refs/heads/swift-2.0",
"path": "zerocloud/proxyquery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "695023"
},
{
"name": "Ruby",
"bytes": "1298"
},
{
"name": "Shell",
"bytes": "3540"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="violin.box", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the inner box plot bounding line color.
width
Sets the inner box plot bounding line width.
""",
),
**kwargs
)
|
{
"content_hash": "2c9afb2fcf22f0f71e77698d80fd3044",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 33.15,
"alnum_prop": 0.5384615384615384,
"repo_name": "plotly/python-api",
"id": "615b3abab97eff574b656edb638d433f179891ff",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/violin/box/_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import unittest
import torch
from parameterized import parameterized
from monai.engines import PrepareBatchExtraInput, SupervisedEvaluator
from tests.utils import assert_allclose
TEST_CASE_0 = [
{"extra_keys": "extra1"},
{"x": torch.tensor([1, 2]), "t1": torch.tensor([5, 6]), "t2": None, "t3": None},
]
TEST_CASE_1 = [
{"extra_keys": ["extra1", "extra3"]},
{"x": torch.tensor([1, 2]), "t1": torch.tensor([5, 6]), "t2": "test", "t3": None},
]
TEST_CASE_2 = [
{"extra_keys": {"t1": "extra2", "t2": "extra3", "t3": "extra1"}},
{"x": torch.tensor([1, 2]), "t1": 16, "t2": "test", "t3": torch.tensor([5, 6])},
]
class TestNet(torch.nn.Module):
def forward(self, x: torch.Tensor, t1=None, t2=None, t3=None):
return {"x": x, "t1": t1, "t2": t2, "t3": t3}
class TestPrepareBatchExtraInput(unittest.TestCase):
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])
def test_content(self, input_args, expected_value):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = [
{
"image": torch.tensor([1, 2]),
"label": torch.tensor([3, 4]),
"extra1": torch.tensor([5, 6]),
"extra2": 16,
"extra3": "test",
}
]
# set up engine
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=dataloader,
epoch_length=1,
network=TestNet(),
non_blocking=True,
prepare_batch=PrepareBatchExtraInput(**input_args),
decollate=False,
)
evaluator.run()
output = evaluator.state.output
assert_allclose(output["image"], torch.tensor([1, 2], device=device))
assert_allclose(output["label"], torch.tensor([3, 4], device=device))
for k, v in output["pred"].items():
if isinstance(v, torch.Tensor):
assert_allclose(v, expected_value[k].to(device))
else:
self.assertEqual(v, expected_value[k])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "cb750c9ffebcb266b95af4eb6c257dd8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 86,
"avg_line_length": 32.84615384615385,
"alnum_prop": 0.5522248243559719,
"repo_name": "Project-MONAI/MONAI",
"id": "79c9a13679f59c194ed0db4721ee41b1fe4c068c",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_prepare_batch_extra_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15956"
},
{
"name": "C++",
"bytes": "189648"
},
{
"name": "Cuda",
"bytes": "154905"
},
{
"name": "Dockerfile",
"bytes": "2454"
},
{
"name": "Python",
"bytes": "7209898"
},
{
"name": "Shell",
"bytes": "20587"
}
],
"symlink_target": ""
}
|
import logging
import os
import pytest
from copy import copy
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import *
from tests.common.test_vector import *
from tests.common.impala_cluster import ImpalaCluster
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIfS3
from tests.util.shell_util import exec_process
class TestHdfsFdCaching(ImpalaTestSuite):
"""
This test suite tests the behavior of HDFS file descriptor caching by evaluating the
metrics exposed by the Impala daemon.
"""
NUM_ROWS = 10000
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def add_test_dimensions(cls):
super(TestHdfsFdCaching, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
@classmethod
def get_workload(cls):
return 'functional-query'
def setup_method(self, method):
self.cleanup_db("cachefd")
self.client.execute("create database cachefd")
self.client.execute("create table cachefd.simple(id int, col1 int, col2 int) "
"stored as parquet")
buf = "insert into cachefd.simple values"
self.client.execute(buf + ", ".join(["({0},{0},{0})".format(x) for x in range(self.NUM_ROWS)]))
def teardown_method(self, methd):
self.cleanup_db("cachedfd")
@pytest.mark.execute_serially
def test_simple_scan(self, vector):
"""Tests that in the default configuration, file handle caching is disabled and no
file handles are cached."""
num_handles_before = self.cached_handles()
assert 0 == num_handles_before
self.execute_query("select * from cachefd.simple limit 1", vector=vector)
num_handles_after = self.cached_handles()
assert 0 == num_handles_after
assert num_handles_after == num_handles_before
assert 0 == self.outstanding_handles()
# No change when reading the table again
for x in range(10):
self.execute_query("select * from cachefd.simple limit 1", vector=vector)
# TODO This assertion fails reliably in the Kudu feature branch build for reasons yet
# unknown, since it seems unrelated to other changes. Once the root cause for the
# failure is known this assertion should be uncommented.
# assert num_handles_after == self.cached_handles()
assert 0 == self.outstanding_handles()
def cached_handles(self):
return self.get_agg_metric("impala-server.io.mgr.num-cached-file-handles")
def outstanding_handles(self):
return self.get_agg_metric("impala-server.io.mgr.num-file-handles-outstanding")
def get_agg_metric(self, key, fun=sum):
cluster = ImpalaCluster()
return fun([s.service.get_metric_value(key) for s
in cluster.impalads])
|
{
"content_hash": "4a01d0ca58cd124e94f349397babdd81",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 99,
"avg_line_length": 35.03658536585366,
"alnum_prop": 0.7201531500174034,
"repo_name": "kapilrastogi/Impala",
"id": "53ea98c0bbdb87b72985fe010f871d4530c1888e",
"size": "3448",
"binary": false,
"copies": "2",
"ref": "refs/heads/cdh5-trunk",
"path": "tests/query_test/test_hdfs_fd_caching.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "421674"
},
{
"name": "C++",
"bytes": "8270225"
},
{
"name": "CMake",
"bytes": "114760"
},
{
"name": "CSS",
"bytes": "89516"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3979799"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Lex",
"bytes": "22598"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Protocol Buffer",
"bytes": "630"
},
{
"name": "Python",
"bytes": "2099455"
},
{
"name": "Shell",
"bytes": "178431"
},
{
"name": "Thrift",
"bytes": "260303"
}
],
"symlink_target": ""
}
|
import os
import re
import shutil
import subunit
import sys
from functools import partial
from io import BytesIO
from testtools import CopyStreamResult
from testtools import StreamResult
from testtools import StreamSummary
from testtools import StreamToDict
from testrepository.repository.file import RepositoryFactory
from testrepository.repository.file import RepositoryNotFound
NAME_SCENARIO_PATTERN = re.compile(r'^(.+) \((.+)\)$')
NAME_TAGS_PATTERN = re.compile(r'^(.+)\[(.+)\]$')
class InvalidSubunitProvider(Exception):
pass
class SubunitProvider(object):
@property
def name(self):
"""Returns a unique name for this provider,
The unique name is such that a valid URL fragment pointing to a
particular stream from this provider is `name_index`, applicable
for paths to pages and data files making use of the stream.
:return: a path fragment referring to the stream at `index` from this
provider
"""
raise NotImplementedError()
@property
def description(self):
"""Returns a user-facing description for this provider.
This description may be used in UI contexts, but will not be used
within paths or other content-sensitive contexts.
:return: a description for this provider
"""
raise NotImplementedError()
@property
def count(self):
raise NotImplementedError()
def get_stream(self, index):
"""Returns a file-like object representing the subunit stream
:param index: the index of the stream; must be between `0` and
`count - 1` (inclusive)
"""
raise NotImplementedError()
@property
def indexes(self):
# for the benefit of django templates
return range(self.count)
@property
def streams(self):
"""Creates a generator to iterate over every stream in the provider
:return: each stream available from this generator
"""
for i in range(self.count):
yield self.get_stream(i)
class RepositoryProvider(SubunitProvider):
def __init__(self, repository_path):
self.repository_path = repository_path
self.repository = RepositoryFactory().open(repository_path)
@property
def name(self):
return "repo_%s" % os.path.basename(self.repository_path)
@property
def description(self):
return "Repository: %s" % os.path.basename(self.repository_path)
@property
def count(self):
return self.repository.count()
def get_stream(self, index):
return self.repository.get_latest_run().get_subunit_stream()
class FileProvider(SubunitProvider):
def __init__(self, path):
if not os.path.exists(path):
raise InvalidSubunitProvider("Stream doesn't exist: %s" % path)
self.path = path
@property
def name(self):
return "file_%s" % os.path.basename(self.path)
@property
def description(self):
return "Subunit File: %s" % os.path.basename(self.path)
@property
def count(self):
return 1
def get_stream(self, index):
if index != 0:
raise IndexError("Index out of bounds: %d" % index)
return open(self.path, "r")
class StandardInputProvider(SubunitProvider):
def __init__(self):
self.buffer = BytesIO()
shutil.copyfileobj(sys.stdin, self.buffer)
self.buffer.seek(0)
@property
def name(self):
return "stdin"
@property
def description(self):
return "Subunit Stream (stdin)"
@property
def count(self):
return 1
def get_stream(self, index):
if index != 0:
raise IndexError()
self.buffer.seek(0)
return self.buffer
def get_providers(repository_paths=None, stream_paths=None, stdin=False):
"""Loads all test providers from locations configured in settings.
:param repository_paths: a list of directory paths containing
'.testrepository' folders to read
:param stream_paths: a list of paths to direct subunit streams
:param stdin: if true, read a subunit stream from standard input
:return: a dict of loaded provider names and their associated
:class:`SubunitProvider` instances
:rtype: dict[str, SubunitProvider]
"""
if repository_paths is None:
repository_paths = []
if stream_paths is None:
stream_paths = []
ret = {}
for path in repository_paths:
try:
p = RepositoryProvider(path)
ret[p.name] = p
except (ValueError, RepositoryNotFound):
continue
for path in stream_paths:
try:
p = FileProvider(path)
ret[p.name] = p
except InvalidSubunitProvider:
continue
if stdin:
p = StandardInputProvider()
ret[p.name] = p
return ret
def _clean_name(name):
# TODO(Tim Buckley) currently throwing away other info - any worth keeping?
m = NAME_TAGS_PATTERN.match(name)
if m:
# tags = m.group(2).split(',')
return m.group(1)
m = NAME_SCENARIO_PATTERN.match(name)
if m:
return '{0}.{1}'.format(m.group(2), m.group(1))
return name
def _strip(text):
return re.sub(r'\W', '', text)
def _clean_details(details):
return {_strip(k): v.as_text() for k, v in details.iteritems()
if v.as_text()}
def _read_test(test, out, strip_details):
# clean up the result test info a bit
start, end = test['timestamps']
out.append({
'name': _clean_name(test['id']),
'status': test['status'],
'tags': list(test['tags']),
'timestamps': test['timestamps'],
'duration': (end - start).total_seconds(),
'details': {} if strip_details else _clean_details(test['details'])
})
def convert_stream(stream_file, strip_details=False):
"""Converts a subunit stream into a raw list of test dicts.
:param stream_file: subunit stream to be converted
:param strip_details: if True, remove test details (e.g. stdout/stderr)
:return: a list of individual test results
"""
ret = []
result_stream = subunit.ByteStreamToStreamResult(stream_file)
starts = StreamResult()
summary = StreamSummary()
outcomes = StreamToDict(partial(_read_test,
out=ret,
strip_details=strip_details))
result = CopyStreamResult([starts, outcomes, summary])
result.startTestRun()
result_stream.run(result)
result.stopTestRun()
return ret
def convert_run(test_run, strip_details=False):
"""Converts the given test run into a raw list of test dicts.
Uses the subunit stream as an intermediate format.(see: read_subunit.py
from subunit2sql)
:param test_run: the test run to convert
:type test_run: AbstractTestRun
:param strip_details: if True, remove test details (e.g. stdout/stderr)
:return: a list of individual test results
"""
return convert_stream(test_run.get_subunit_stream(), strip_details)
def _descend_recurse(parent, parts_remaining):
if not parts_remaining:
return parent
target = parts_remaining.pop()
# create elements on-the-fly
if 'children' not in parent:
parent['children'] = []
# attempt to find an existing matching child
child = None
for c in parent['children']:
if c['name'] == target:
child = c
break
# create manually if the target child doesn't already exist
if not child:
child = {'name': target}
parent['children'].append(child)
return _descend_recurse(child, parts_remaining)
def _descend(root, path):
"""Retrieves the node within the 'root' dict
Retrieves the node within the `root` dict denoted by the series of
'.'-separated children as specified in `path`. Children for each node must
be contained in a list `children`, and name comparison will be
performed on the field `name`.
If parts of the path (up to and including the last child itself) do not
exist, they will be created automatically under the root dict.
:param root: the root node
:param path: a '.'-separated path
:type path: str
:return: the dict node representing the last child
"""
path_parts = path.split('.')
path_parts.reverse()
root['name'] = path_parts.pop()
return _descend_recurse(root, path_parts)
def reorganize(converted_test_run):
"""Reorganizes test run, forming trees based on module paths
Reorganizes and categorizes the given test run, forming tree of tests
categorized by their module paths.
:param converted_test_run:
:return: a dict tree of test nodes, organized by module path
"""
ret = {}
for entry in converted_test_run:
entry['name_full'] = entry['name']
dest_node = _descend(ret, entry['name'])
# update the dest node with info from the current entry, but hold on to
# the already-parsed name
name = dest_node['name']
dest_node.update(entry)
dest_node['name'] = name
return ret
|
{
"content_hash": "f7e1a76ddf6f9ea23fa74f393aa6ccc1",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 79,
"avg_line_length": 26.930232558139537,
"alnum_prop": 0.6340673575129534,
"repo_name": "timothyb89/stackviz-ng",
"id": "868c314a8e178b0792826193bff80881292f022f",
"size": "9870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackviz/parser/tempest_subunit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "77431"
},
{
"name": "HTML",
"bytes": "4658"
},
{
"name": "JavaScript",
"bytes": "23950"
},
{
"name": "Python",
"bytes": "36954"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import unittest
from unittest import mock
from airflow.providers.amazon.aws.hooks.glue_crawler import GlueCrawlerHook
from airflow.providers.amazon.aws.sensors.glue_crawler import GlueCrawlerSensor
class TestGlueCrawlerSensor(unittest.TestCase):
def setUp(self):
self.sensor = GlueCrawlerSensor(
task_id='test_glue_crawler_sensor',
crawler_name='aws_test_glue_crawler',
poke_interval=1,
timeout=5,
aws_conn_id='aws_default',
)
@mock.patch.object(GlueCrawlerHook, 'get_crawler')
def test_poke_success(self, mock_get_crawler):
mock_get_crawler.return_value['LastCrawl']['Status'] = "SUCCEEDED"
self.assertFalse(self.sensor.poke({}))
mock_get_crawler.assert_called_once_with('aws_test_glue_crawler')
@mock.patch.object(GlueCrawlerHook, 'get_crawler')
def test_poke_failed(self, mock_get_crawler):
mock_get_crawler.return_value['LastCrawl']['Status'] = "FAILED"
self.assertFalse(self.sensor.poke({}))
mock_get_crawler.assert_called_once_with('aws_test_glue_crawler')
@mock.patch.object(GlueCrawlerHook, 'get_crawler')
def test_poke_cancelled(self, mock_get_crawler):
mock_get_crawler.return_value['LastCrawl']['Status'] = "CANCELLED"
self.assertFalse(self.sensor.poke({}))
mock_get_crawler.assert_called_once_with('aws_test_glue_crawler')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "931d78b1e27df47acc06e245365f528a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 37.55,
"alnum_prop": 0.6750998668442078,
"repo_name": "cfei18/incubator-airflow",
"id": "0821141bb9e2ece7a181dab96b373d4029224114",
"size": "2287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/amazon/aws/sensors/test_glue_crawler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
import collections
from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_versionedobjects import fixture
from nova import exception
from nova.notifications.objects import base as notification
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import test
from nova.tests.unit.objects import test_objects
class TestNotificationBase(test.NoDBTestCase):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
VERSION = '1.0'
fields = {
'field_1': fields.StringField(),
'field_2': fields.IntegerField(),
'not_important_field': fields.IntegerField(),
'lazy_field': fields.IntegerField()
}
def obj_load_attr(self, attrname):
if attrname == 'lazy_field':
self.lazy_field = 42
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
def __init__(self, not_important_field):
super(TestNotificationBase.TestObject, self).__init__()
# field1 and field_2 simulates that some fields are initialized
# outside of the object's ctor
self.not_important_field = not_important_field
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
'lazy_field': ('source_field', 'lazy_field')
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
# filled by the schema
'field_1': fields.StringField(nullable=True),
'field_2': fields.IntegerField(), # filled by the schema
'lazy_field': fields.IntegerField() # filled by the schema
}
def __init__(self, extra_field, source_field):
super(TestNotificationBase.TestNotificationPayload,
self).__init__()
self.extra_field = extra_field
self.populate_schema(source_field=source_field)
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'extra_field': fields.StringField(), # filled by ctor
}
def __init__(self, extra_field):
super(TestNotificationBase.TestNotificationPayloadEmptySchema,
self).__init__()
self.extra_field = extra_field
@notification.notification_sample('test-update-1.json')
@notification.notification_sample('test-update-2.json')
@base.NovaObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.NovaObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
fake_service = {
'created_at': timeutils.utcnow().replace(microsecond=0),
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuids.service,
'host': 'fake-host',
'binary': 'nova-compute',
'topic': 'fake-service-topic',
'report_count': 1,
'forced_down': False,
'disabled': False,
'disabled_reason': None,
'last_seen_up': None,
'version': 1}
expected_payload = {
'nova_object.name': 'TestNotificationPayload',
'nova_object.data': {
'extra_field': 'test string',
'field_1': 'test1',
'field_2': 15,
'lazy_field': 42},
'nova_object.version': '1.0',
'nova_object.namespace': 'nova'}
def setUp(self):
super(TestNotificationBase, self).setUp()
with mock.patch(
'nova.db.main.api.service_update') as mock_db_service_update:
self.service_obj = objects.Service(context=mock.sentinel.context,
id=self.fake_service['id'])
self.service_obj.obj_reset_changes(['version'])
mock_db_service_update.return_value = self.fake_service
self.service_obj.save()
self.my_obj = self.TestObject(not_important_field=13)
self.my_obj.field_1 = 'test1'
self.my_obj.field_2 = 15
self.payload = self.TestNotificationPayload(
extra_field='test string', source_field=self.my_obj)
self.notification = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE,
phase=fields.NotificationPhase.START),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type,
expected_payload):
mock_notifier.prepare.assert_called_once_with(
publisher_id='nova-compute:fake-host')
mock_notify = mock_notifier.prepare.return_value.info
self.assertTrue(mock_notify.called)
self.assertEqual(mock_notify.call_args[0][0], mock_context)
self.assertEqual(mock_notify.call_args[1]['event_type'],
expected_event_type)
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('nova.rpc.LEGACY_NOTIFIER')
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_notification(self, mock_notifier, mock_legacy):
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
self.notification.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update.start',
expected_payload=self.expected_payload)
self.assertFalse(mock_legacy.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host',
source='nova-compute'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('nova.rpc.LEGACY_NOTIFIER')
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_event_type_without_phase(self, mock_notifier, mock_legacy):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
self.assertFalse(mock_legacy.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
payload = self.TestNotificationPayload(
extra_field='test string', source_field=self.my_obj)
payload.populated = False
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=payload)
mock_context = mock.Mock()
self.assertRaises(AssertionError, noti.emit, mock_context)
self.assertFalse(mock_notifier.called)
def test_lazy_load_source_field(self):
my_obj = self.TestObject(not_important_field=13)
my_obj.field_1 = 'test1'
my_obj.field_2 = 15
payload = self.TestNotificationPayload(extra_field='test string',
source_field=my_obj)
self.assertEqual(42, payload.lazy_field)
def test_uninited_source_field_defaulted_to_none(self):
my_obj = self.TestObject(not_important_field=13)
# intentionally not initializing field_1 to simulate an uninited but
# nullable field
my_obj.field_2 = 15
payload = self.TestNotificationPayload(extra_field='test string',
source_field=my_obj)
self.assertIsNone(payload.field_1)
def test_uninited_source_field_not_nullable_payload_field_fails(self):
my_obj = self.TestObject(not_important_field=13)
# intentionally not initializing field_2 to simulate an uninited no
# nullable field
my_obj.field_1 = 'test1'
self.assertRaises(ValueError, self.TestNotificationPayload,
extra_field='test string', source_field=my_obj)
@mock.patch('nova.rpc.NOTIFIER')
def test_empty_schema(self, mock_notifier):
non_populated_payload = self.TestNotificationPayloadEmptySchema(
extra_field='test string')
noti = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=
{'nova_object.name': 'TestNotificationPayloadEmptySchema',
'nova_object.data': {'extra_field': u'test string'},
'nova_object.version': '1.0',
'nova_object.namespace': 'nova'})
def test_sample_decorator(self):
self.assertEqual(2, len(self.TestNotification.samples))
self.assertIn('test-update-1.json', self.TestNotification.samples)
self.assertIn('test-update-2.json', self.TestNotification.samples)
@mock.patch('nova.notifications.objects.base.NotificationBase._emit')
@mock.patch('nova.rpc.NOTIFIER')
def test_payload_is_not_generated_if_notifier_is_not_enabled(
self, mock_notifier, mock_emit):
mock_notifier.is_enabled.return_value = False
payload = self.TestNotificationPayload(
extra_field='test string',
source_field=self.my_obj)
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=payload)
mock_context = mock.Mock()
noti.emit(mock_context)
self.assertFalse(payload.populated)
self.assertFalse(mock_emit.called)
@mock.patch('nova.notifications.objects.base.NotificationBase._emit')
def test_payload_is_not_generated_if_notification_format_is_unversioned(
self, mock_emit):
self.flags(notification_format='unversioned', group='notifications')
payload = self.TestNotificationPayload(
extra_field='test string',
source_field=self.my_obj)
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=payload)
mock_context = mock.Mock()
noti.emit(mock_context)
self.assertFalse(payload.populated)
self.assertFalse(mock_emit.called)
notification_object_data = {
'AggregateCacheNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'AggregateCachePayload': '1.0-3f4dc002bed67d06eecb577242a43572',
'AggregateNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'AggregatePayload': '1.1-1eb9adcc4440d8627de6ec37c6398746',
'AuditPeriodPayload': '1.0-2b429dd307b8374636703b843fa3f9cb',
'BlockDevicePayload': '1.0-29751e1b6d41b1454e36768a1e764df8',
'CellMappingPayload': '2.0-8acd412eb4edff1cd2ecb9867feeb243',
'ComputeTaskNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ComputeTaskPayload': '1.0-e3d34762c14d131c98337b72e8c600e1',
'DestinationPayload': '1.0-4ccf26318dd18c4377dada2b1e74ec2e',
'EventType': '1.21-6a5f57fafe478f354f66b81b4cb537ea',
'ExceptionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ExceptionPayload': '1.1-6c43008bd81885a63bc7f7c629f0793b',
'FlavorNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'FlavorPayload': '1.4-2e7011b8b4e59167fe8b7a0a81f0d452',
'ImageMetaPayload': '1.0-0e65beeacb3393beed564a57bc2bc989',
# NOTE(efried): ImageMetaPropsPayload is built dynamically from
# ImageMetaProps, so when you see a fail here for that reason, you must
# *also* bump the version of ImageMetaPropsPayload. See its docstring for
# more information.
'ImageMetaPropsPayload': '1.12-b9c64832d7772c1973e913bacbe0e8f9',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.8-4fa3da9cbf0761f1f700ae578f36dc2f',
'InstanceActionRebuildNotification':
'1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionRebuildPayload': '1.9-10eebfbf6e944aaac43188173dff9e01',
'InstanceActionRescueNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionRescuePayload': '1.3-dbf4de42bc02ebc4cdbe42f90d343bfd',
'InstanceActionResizePrepNotification':
'1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionResizePrepPayload': '1.3-baca73cc450f72d4e1ce6b9aca2bbdf6',
'InstanceActionVolumeNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionVolumePayload': '1.6-0a30e870677e6166c50645623e287f78',
'InstanceActionVolumeSwapNotification':
'1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionVolumeSwapPayload': '1.8-d2255347cb2353cb12c174aad4dab93c',
'InstanceCreateNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceCreatePayload': '1.12-749f2da7c2435a0e55c076d6bf0ea81d',
'InstancePayload': '1.8-60d62df5a6b6aa7817ec5d09f4b8a3e5',
'InstanceActionSnapshotNotification':
'1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionSnapshotPayload': '1.9-c3e0bbaaefafdfa2f8e6e504c2c9b12c',
'InstanceExistsNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceExistsPayload': '2.0-802d380c61cba2edb905c45052c612b0',
'InstanceNUMACellPayload': '1.2-a367add3378c71c21c817ab2b23db3bf',
'InstanceNUMATopologyPayload': '1.0-247361b152047c18ae9ad1da2544a3c9',
'InstancePCIRequestPayload': '1.1-bda86a95ef04bdc27789342466b81bb5',
'InstancePCIRequestsPayload': '1.0-6751cffe0c0fabd212aad624f672429a',
'InstanceStateUpdatePayload': '1.0-07e111c0fa0f6db0f79b0726d593e3da',
'InstanceUpdateNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceUpdatePayload': '2.0-6ad65963b4ef57210544651e1077ac97',
'IpPayload': '1.0-8ecf567a99e516d4af094439a7632d34',
'KeypairNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'KeypairPayload': '1.0-6daebbbde0e1bf35c1556b1ecd9385c1',
'LibvirtErrorNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'LibvirtErrorPayload': '1.0-9e7a8f0b895dd15531d5a6f3aa95d58e',
'MetricPayload': '1.0-bcdbe85048f335132e4c82a1b8fa3da8',
'MetricsNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'MetricsPayload': '1.0-65c69b15b4de5a8c01971cb5bb9ab650',
'NotificationPublisher': '2.2-b6ad48126247e10b46b6b0240e52e614',
'RequestSpecPayload': '1.1-64d30723a2e381d0cd6a16a877002c64',
'SchedulerRetriesPayload': '1.0-03a07d09575ef52cced5b1b24301d0b4',
'SelectDestinationsNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ServerGroupNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ServerGroupPayload': '1.1-4ded2997ea1b07038f7af33ef5c45f7f',
'ServiceStatusNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ServiceStatusPayload': '1.1-7b6856bd879db7f3ecbcd0ca9f35f92f',
'VirtCPUTopologyPayload': '1.0-1b1600fe55465209682d96bbe3209f27',
'VolumeUsageNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'VolumeUsagePayload': '1.0-5f99d8b978a32040eecac0975e5a53e9',
}
class TestNotificationObjectVersions(test.NoDBTestCase):
def setUp(self):
super(TestNotificationObjectVersions, self).setUp()
base.NovaObjectRegistry.register_notification_objects()
def test_versions(self):
checker = fixture.ObjectVersionChecker(
test_objects.get_nova_objects())
notification_object_data.update(test_objects.object_data)
expected, actual = checker.test_hashes(notification_object_data,
extra_data_func=get_extra_data)
self.assertEqual(expected, actual,
'Some notification objects have changed; please make '
'sure the versions have been bumped, and then update '
'their hashes here.')
def test_notification_payload_version_depends_on_the_schema(self):
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
checker = fixture.ObjectVersionChecker(
{'TestNotificationPayload': (TestNotificationPayload,)})
old_hash = checker.get_hashes(extra_data_func=get_extra_data)
TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
'field_3')
new_hash = checker.get_hashes(extra_data_func=get_extra_data)
self.assertNotEqual(old_hash, new_hash)
def get_extra_data(obj_class):
extra_data = tuple()
# Get the SCHEMA items to add to the fingerprint
# if we are looking at a notification
if issubclass(obj_class, notification.NotificationPayloadBase):
schema_data = collections.OrderedDict(
sorted(obj_class.SCHEMA.items()))
extra_data += (schema_data,)
return extra_data
|
{
"content_hash": "80966fdb0a6453566acca2c2ac952a2c",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 79,
"avg_line_length": 42.33126293995859,
"alnum_prop": 0.6525970850044018,
"repo_name": "mahak/nova",
"id": "de9e6f276232fc7aeb6965bc12817e793d6586a7",
"size": "21043",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/notifications/objects/test_notification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
import base64
import email.policy
import os
import subprocess
from email import message_from_string
from email.message import EmailMessage, MIMEPart
from typing import Any, Callable, Dict, Mapping, Optional
from unittest import mock
import orjson
from django.conf import settings
from django.http import HttpResponse
from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, ensure_stream
from zerver.lib.email_mirror import (
ZulipEmailForwardError,
create_missed_message_address,
filter_footer,
get_missed_message_token_from_address,
is_forwarded,
is_missed_message_address,
log_and_report,
process_message,
process_missed_message,
redact_email_address,
strip_from_subject,
)
from zerver.lib.email_mirror_helpers import (
decode_email_address,
encode_email_address,
get_email_gateway_message_string_from_address,
)
from zerver.lib.email_notifications import convert_html_to_markdown
from zerver.lib.send_email import FromAddress
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import mock_queue_publish, most_recent_message, most_recent_usermessage
from zerver.models import (
MissedMessageEmailAddress,
Recipient,
UserProfile,
get_display_recipient,
get_realm,
get_stream,
get_system_bot,
)
from zerver.worker.queue_processors import MirrorWorker
logger_name = "zerver.lib.email_mirror"
class TestEncodeDecode(ZulipTestCase):
def _assert_options(self, options: Dict[str, bool], show_sender: bool=False,
include_footer: bool=False, include_quotes: bool=False,
prefer_text: bool=True) -> None:
self.assertEqual(show_sender, ('show_sender' in options) and options['show_sender'])
self.assertEqual(include_footer, ('include_footer' in options) and options['include_footer'])
self.assertEqual(include_quotes, ('include_quotes' in options) and options['include_quotes'])
self.assertEqual(prefer_text, options.get('prefer_text', True))
def test_encode_decode(self) -> None:
realm = get_realm('zulip')
stream_name = 'dev. help'
stream = ensure_stream(realm, stream_name)
email_address = encode_email_address(stream)
self.assertEqual(email_address, f"dev-help.{stream.email_token}@testserver")
# The default form of the email address (with an option - "include-footer"):
token, options = decode_email_address(
f"dev-help.{stream.email_token}.include-footer@testserver",
)
self._assert_options(options, include_footer=True)
self.assertEqual(token, stream.email_token)
# Using + instead of . as the separator is also supported for backwards compatibility,
# since that was the original form of addresses that we used:
token, options = decode_email_address(
f"dev-help+{stream.email_token}+include-footer@testserver",
)
self._assert_options(options, include_footer=True)
self.assertEqual(token, stream.email_token)
token, options = decode_email_address(email_address)
self._assert_options(options)
self.assertEqual(token, stream.email_token)
# We also handle mixing + and . but it shouldn't be recommended to users.
email_address_all_options = "dev-help.{}+include-footer.show-sender+include-quotes@testserver"
email_address_all_options = email_address_all_options.format(stream.email_token)
token, options = decode_email_address(email_address_all_options)
self._assert_options(options, show_sender=True, include_footer=True, include_quotes=True)
self.assertEqual(token, stream.email_token)
email_address = email_address.replace('@testserver', '@zulip.org')
email_address_all_options = email_address_all_options.replace('@testserver', '@zulip.org')
with self.assertRaises(ZulipEmailForwardError):
decode_email_address(email_address)
with self.assertRaises(ZulipEmailForwardError):
decode_email_address(email_address_all_options)
with self.settings(EMAIL_GATEWAY_EXTRA_PATTERN_HACK='@zulip.org'):
token, options = decode_email_address(email_address)
self._assert_options(options)
self.assertEqual(token, stream.email_token)
token, options = decode_email_address(email_address_all_options)
self._assert_options(options, show_sender=True, include_footer=True, include_quotes=True)
self.assertEqual(token, stream.email_token)
with self.assertRaises(ZulipEmailForwardError):
decode_email_address('bogus')
# Test stream name encoding changes introduced due to
# https://github.com/zulip/zulip/issues/9840
def test_encode_decode_nonlatin_alphabet_stream_name(self) -> None:
realm = get_realm('zulip')
stream_name = 'Тестовы some ascii letters'
stream = ensure_stream(realm, stream_name)
email_address = encode_email_address(stream)
msg_string = get_email_gateway_message_string_from_address(email_address)
parts = msg_string.split('+')
# Stream name should be completely stripped to '', so msg_string
# should only have the email_token in it.
self.assertEqual(len(parts), 1)
# Correctly decode the resulting address that doesn't have the stream name:
token, show_sender = decode_email_address(email_address)
self.assertFalse(show_sender)
self.assertEqual(token, stream.email_token)
asciiable_stream_name = "ąężć"
stream = ensure_stream(realm, asciiable_stream_name)
email_address = encode_email_address(stream)
self.assertTrue(email_address.startswith("aezc."))
def test_decode_ignores_stream_name(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
stream_to_address = stream_to_address.replace("denmark", "Some_name")
# get the email_token:
token = decode_email_address(stream_to_address)[0]
self.assertEqual(token, stream.email_token)
def test_encode_with_show_sender(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream, show_sender=True)
token, options = decode_email_address(stream_to_address)
self._assert_options(options, show_sender=True)
self.assertEqual(token, stream.email_token)
def test_decode_prefer_text_options(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
address_prefer_text = f"Denmark.{stream.email_token}.prefer-text@testserver"
address_prefer_html = f"Denmark.{stream.email_token}.prefer-html@testserver"
token, options = decode_email_address(address_prefer_text)
self._assert_options(options, prefer_text=True)
token, options = decode_email_address(address_prefer_html)
self._assert_options(options, prefer_text=False)
class TestGetMissedMessageToken(ZulipTestCase):
def test_get_missed_message_token(self) -> None:
with self.settings(EMAIL_GATEWAY_PATTERN="%s@example.com"):
address = 'mm' + ('x' * 32) + '@example.com'
self.assertTrue(is_missed_message_address(address))
token = get_missed_message_token_from_address(address)
self.assertEqual(token, 'mm' + 'x' * 32)
# This next section was a bug at one point--we'd treat ordinary
# user addresses that happened to begin with "mm" as being
# the special mm+32chars tokens.
address = 'mmathers@example.com'
self.assertFalse(is_missed_message_address(address))
with self.assertRaises(ZulipEmailForwardError):
get_missed_message_token_from_address(address)
# Now test the case where we our address does not match the
# EMAIL_GATEWAY_PATTERN.
# This used to crash in an ugly way; we want to throw a proper
# exception.
address = 'alice@not-the-domain-we-were-expecting.com'
self.assertFalse(is_missed_message_address(address))
with self.assertRaises(ZulipEmailForwardError):
get_missed_message_token_from_address(address)
class TestFilterFooter(ZulipTestCase):
def test_filter_footer(self) -> None:
text = """Test message
--
Footer"""
result = filter_footer(text)
self.assertEqual(result, "Test message")
def test_filter_footer_many_parts(self) -> None:
text = """Test message
--
Part1
--
Part2"""
result = filter_footer(text)
# Multiple possible footers, don't strip
self.assertEqual(result, text)
class TestStreamEmailMessagesSuccess(ZulipTestCase):
def test_receive_stream_email_messages_success(self) -> None:
# build dummy messages for stream
# test valid incoming stream message is processed properly
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
# Test receiving an email with the address on an UnstructuredHeader
# (e.g. Envelope-To) instead of an AddressHeader (e.g. To).
# https://github.com/zulip/zulip/issues/15864
def test_receive_stream_email_messages_other_header_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
# Simulate a mailing list
incoming_valid_message['To'] = "foo-mailinglist@example.com"
incoming_valid_message['Envelope-To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
def test_receive_stream_email_messages_blank_subject_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = ''
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), "(no topic)")
def test_receive_private_stream_email_messages_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.make_stream("private_stream", invite_only=True)
self.subscribe(user_profile, "private_stream")
stream = get_stream("private_stream", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
def test_receive_stream_email_multiple_recipient_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
# stream address is angle-addr within multiple addresses
stream_to_addresses = ["A.N. Other <another@example.org>",
f"Denmark <{encode_email_address(stream)}>"]
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = ", ".join(stream_to_addresses)
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
def test_receive_stream_email_show_sender_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
parts = stream_to_address.split('@')
parts[0] += "+show-sender"
stream_to_address = '@'.join(parts)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "From: {}\n{}".format(self.example_email('hamlet'),
"TestStreamEmailMessages Body"))
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
def test_receive_stream_email_show_sender_utf8_encoded_sender(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
parts = stream_to_address.split('@')
parts[0] += "+show-sender"
stream_to_address = '@'.join(parts)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = 'Test =?utf-8?b?VXNlcsOzxIXEmQ==?= <=?utf-8?q?hamlet=5F=C4=99?=@zulip.com>'
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "From: {}\n{}".format('Test Useróąę <hamlet_ę@zulip.com>',
"TestStreamEmailMessages Body"))
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
def test_receive_stream_email_include_footer_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
parts = stream_to_address.split('@')
parts[0] += "+include-footer"
stream_to_address = '@'.join(parts)
text = """Test message
--
Footer"""
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content(text)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, text)
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
def test_receive_stream_email_include_quotes_success(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
parts = stream_to_address.split('@')
parts[0] += "+include-quotes"
stream_to_address = '@'.join(parts)
text = """Reply
-----Original Message-----
Quote"""
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content(text)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, text)
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
class TestEmailMirrorMessagesWithAttachments(ZulipTestCase):
def test_message_with_valid_attachment(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content("Test body")
with open(os.path.join(settings.DEPLOY_ROOT, "static/images/default-avatar.png"), 'rb') as f:
image_bytes = f.read()
incoming_valid_message.add_attachment(
image_bytes,
maintype="image",
subtype="png",
filename="image.png",
)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
with mock.patch('zerver.lib.email_mirror.upload_message_file',
return_value='https://test_url') as upload_message_file:
process_message(incoming_valid_message)
upload_message_file.assert_called_with('image.png', len(image_bytes),
'image/png', image_bytes,
get_system_bot(settings.EMAIL_GATEWAY_BOT),
target_realm=user_profile.realm)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Test body\n[image.png](https://test_url)")
def test_message_with_attachment_utf8_filename(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content("Test body")
with open(os.path.join(settings.DEPLOY_ROOT, "static/images/default-avatar.png"), 'rb') as f:
image_bytes = f.read()
utf8_filename = "image_ąęó.png"
incoming_valid_message.add_attachment(
image_bytes,
maintype="image",
subtype="png",
filename=utf8_filename,
)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
with mock.patch('zerver.lib.email_mirror.upload_message_file',
return_value='https://test_url') as upload_message_file:
process_message(incoming_valid_message)
upload_message_file.assert_called_with(utf8_filename, len(image_bytes),
'image/png', image_bytes,
get_system_bot(settings.EMAIL_GATEWAY_BOT),
target_realm=user_profile.realm)
message = most_recent_message(user_profile)
self.assertEqual(message.content, f"Test body\n[{utf8_filename}](https://test_url)")
def test_message_with_valid_nested_attachment(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content("Test body")
nested_multipart = EmailMessage()
nested_multipart.set_content("Nested text that should get skipped.")
with open(os.path.join(settings.DEPLOY_ROOT, "static/images/default-avatar.png"), 'rb') as f:
image_bytes = f.read()
nested_multipart.add_attachment(
image_bytes,
maintype="image",
subtype="png",
filename="image.png",
)
incoming_valid_message.add_attachment(nested_multipart)
incoming_valid_message['Subject'] = 'Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
with mock.patch('zerver.lib.email_mirror.upload_message_file',
return_value='https://test_url') as upload_message_file:
process_message(incoming_valid_message)
upload_message_file.assert_called_with('image.png', len(image_bytes),
'image/png', image_bytes,
get_system_bot(settings.EMAIL_GATEWAY_BOT),
target_realm=user_profile.realm)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Test body\n[image.png](https://test_url)")
def test_message_with_invalid_attachment(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content("Test body")
# Create an invalid attachment:
attachment_msg = MIMEPart()
attachment_msg.add_header('Content-Disposition', 'attachment', filename="some_attachment")
incoming_valid_message.add_attachment(attachment_msg)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
with self.assertLogs(logger_name, level="WARNING") as m:
process_message(incoming_valid_message)
self.assertEqual(
m.output,
["WARNING:{}:Payload is not bytes (invalid attachment {} in message from {}).".format(logger_name, "some_attachment", self.example_email('hamlet'))]
)
def test_receive_plaintext_and_html_prefer_text_html_options(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_address = f"Denmark.{stream.email_token}@testserver"
stream_address_prefer_html = f"Denmark.{stream.email_token}.prefer-html@testserver"
text = "Test message"
html = "<html><body><b>Test html message</b></body></html>"
incoming_valid_message = EmailMessage()
incoming_valid_message.add_alternative(text)
incoming_valid_message.add_alternative(html, subtype="html")
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Test message")
del incoming_valid_message['To']
incoming_valid_message['To'] = stream_address_prefer_html
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "**Test html message**")
def test_receive_only_plaintext_with_prefer_html_option(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_address_prefer_html = f"Denmark.{stream.email_token}.prefer-html@testserver"
text = "Test message"
# This should be correctly identified as empty html body:
html = "<html><body></body></html>"
incoming_valid_message = EmailMessage()
incoming_valid_message.add_alternative(text)
incoming_valid_message.add_alternative(html, subtype="html")
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_address_prefer_html
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
# HTML body is empty, so the plaintext content should be picked, despite prefer-html option.
self.assertEqual(message.content, "Test message")
class TestStreamEmailMessagesEmptyBody(ZulipTestCase):
def test_receive_stream_email_messages_empty_body(self) -> None:
# build dummy messages for stream
# test message with empty body is not sent
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
# empty body
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
with self.assertLogs(logger_name, level="WARNING") as m:
process_message(incoming_valid_message)
self.assertEqual(m.output, [f"WARNING:{logger_name}:Email has no nonempty body sections; ignoring."])
def test_receive_stream_email_messages_no_textual_body(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
# No textual body
incoming_valid_message = EmailMessage()
with open(os.path.join(settings.DEPLOY_ROOT, "static/images/default-avatar.png"), 'rb') as f:
incoming_valid_message.add_attachment(
f.read(),
maintype="image",
subtype="png",
)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
with self.assertLogs(logger_name, level="WARNING") as m:
process_message(incoming_valid_message)
self.assertEqual(
m.output,
[f"WARNING:{logger_name}:Content types: ['multipart/mixed', 'image/png']",
f"WARNING:{logger_name}:Unable to find plaintext or HTML message body"]
)
def test_receive_stream_email_messages_empty_body_after_stripping(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
headers = {}
headers['Reply-To'] = self.example_email('othello')
# empty body
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('-- \nFooter')
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "(No email body)")
class TestMissedMessageEmailMessages(ZulipTestCase):
def test_receive_missed_personal_message_email_messages(self) -> None:
# build dummy messages for missed messages email reply
# have Hamlet send Othello a PM. Othello will reply via email
# Hamlet will receive the message.
self.login('hamlet')
othello = self.example_user('othello')
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": orjson.dumps([othello.id]).decode()})
self.assert_json_success(result)
user_profile = self.example_user('othello')
usermessage = most_recent_usermessage(user_profile)
# we don't want to send actual emails but we do need to create and store the
# token for looking up who did reply.
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('othello')
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# confirm that Hamlet got the message
user_profile = self.example_user('hamlet')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('othello'))
self.assertEqual(message.recipient.id, user_profile.id)
self.assertEqual(message.recipient.type, Recipient.PERSONAL)
def test_receive_missed_huddle_message_email_messages(self) -> None:
# build dummy messages for missed messages email reply
# have Othello send Iago and Cordelia a PM. Cordelia will reply via email
# Iago and Othello will receive the message.
self.login('othello')
cordelia = self.example_user('cordelia')
iago = self.example_user('iago')
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": orjson.dumps([cordelia.id, iago.id]).decode()})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
usermessage = most_recent_usermessage(user_profile)
# we don't want to send actual emails but we do need to create and store the
# token for looking up who did reply.
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedHuddleMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedHuddleMessageEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('cordelia')
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = self.example_email('cordelia')
process_message(incoming_valid_message)
# Confirm Iago received the message.
user_profile = self.example_user('iago')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedHuddleMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('cordelia'))
self.assertEqual(message.recipient.type, Recipient.HUDDLE)
# Confirm Othello received the message.
user_profile = self.example_user('othello')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedHuddleMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('cordelia'))
self.assertEqual(message.recipient.type, Recipient.HUDDLE)
def test_receive_missed_stream_message_email_messages(self) -> None:
# build dummy messages for missed messages email reply
# have Hamlet send a message to stream Denmark, that Othello
# will receive a missed message email about.
# Othello will reply via email.
# Hamlet will see the message in the stream.
self.subscribe(self.example_user("hamlet"), "Denmark")
self.subscribe(self.example_user("othello"), "Denmark")
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"topic": "test topic",
"content": "test_receive_missed_stream_message_email_messages",
"client": "test suite",
"to": "Denmark"})
self.assert_json_success(result)
user_profile = self.example_user('othello')
usermessage = most_recent_usermessage(user_profile)
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
process_message(incoming_valid_message)
# confirm that Hamlet got the message
user_profile = self.example_user('hamlet')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('othello'))
self.assertEqual(message.recipient.type, Recipient.STREAM)
self.assertEqual(message.recipient.id, usermessage.message.recipient.id)
def test_missed_stream_message_email_response_tracks_topic_change(self) -> None:
self.subscribe(self.example_user("hamlet"), "Denmark")
self.subscribe(self.example_user("othello"), "Denmark")
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"topic": "test topic",
"content": "test_receive_missed_stream_message_email_messages",
"client": "test suite",
"to": "Denmark"})
self.assert_json_success(result)
user_profile = self.example_user('othello')
usermessage = most_recent_usermessage(user_profile)
mm_address = create_missed_message_address(user_profile, usermessage.message)
# The mm address has been generated, now we change the topic of the message and see
# if the response to the mm address will be correctly posted with the updated topic.
usermessage.message.subject = "updated topic"
usermessage.message.save(update_fields=["subject"])
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
process_message(incoming_valid_message)
# confirm that Hamlet got the message
user_profile = self.example_user('hamlet')
message = most_recent_message(user_profile)
self.assertEqual(message.subject, "updated topic")
self.assertEqual(message.content, "TestMissedMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('othello'))
self.assertEqual(message.recipient.type, Recipient.STREAM)
self.assertEqual(message.recipient.id, usermessage.message.recipient.id)
def test_missed_message_email_response_from_deactivated_user(self) -> None:
self.subscribe(self.example_user("hamlet"), "Denmark")
self.subscribe(self.example_user("othello"), "Denmark")
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"topic": "test topic",
"content": "test_receive_missed_stream_message_email_messages",
"client": "test suite",
"to": "Denmark"})
self.assert_json_success(result)
user_profile = self.example_user('othello')
message = most_recent_message(user_profile)
mm_address = create_missed_message_address(user_profile, message)
do_deactivate_user(user_profile)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
initial_last_message = self.get_last_message()
process_message(incoming_valid_message)
# Since othello is deactivated, his message shouldn't be posted:
self.assertEqual(initial_last_message, self.get_last_message())
def test_missed_message_email_response_from_deactivated_realm(self) -> None:
self.subscribe(self.example_user("hamlet"), "Denmark")
self.subscribe(self.example_user("othello"), "Denmark")
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"topic": "test topic",
"content": "test_receive_missed_stream_message_email_messages",
"client": "test suite",
"to": "Denmark"})
self.assert_json_success(result)
user_profile = self.example_user('othello')
message = most_recent_message(user_profile)
mm_address = create_missed_message_address(user_profile, message)
do_deactivate_realm(user_profile.realm)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
initial_last_message = self.get_last_message()
process_message(incoming_valid_message)
# Since othello's realm is deactivated, his message shouldn't be posted:
self.assertEqual(initial_last_message, self.get_last_message())
def test_missed_message_email_multiple_responses(self) -> None:
self.subscribe(self.example_user("hamlet"), "Denmark")
self.subscribe(self.example_user("othello"), "Denmark")
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"topic": "test topic",
"content": "test_receive_missed_stream_message_email_messages",
"client": "test suite",
"to": "Denmark"})
self.assert_json_success(result)
user_profile = self.example_user('othello')
message = most_recent_message(user_profile)
mm_address = create_missed_message_address(user_profile, message)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestMissedMessageEmailMessages Body')
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
for i in range(0, MissedMessageEmailAddress.ALLOWED_USES):
process_missed_message(mm_address, incoming_valid_message)
with self.assertRaises(ZulipEmailForwardError):
process_missed_message(mm_address, incoming_valid_message)
class TestEmptyGatewaySetting(ZulipTestCase):
def test_missed_message(self) -> None:
self.login('othello')
cordelia = self.example_user('cordelia')
iago = self.example_user('iago')
payload = dict(
type="private",
content="test_receive_missed_message_email_messages",
client="test suite",
to=orjson.dumps([cordelia.id, iago.id]).decode(),
)
result = self.client_post("/json/messages", payload)
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
usermessage = most_recent_usermessage(user_profile)
with self.settings(EMAIL_GATEWAY_PATTERN=''):
mm_address = create_missed_message_address(user_profile, usermessage.message)
self.assertEqual(mm_address, FromAddress.NOREPLY)
def test_encode_email_addr(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
with self.settings(EMAIL_GATEWAY_PATTERN=''):
test_address = encode_email_address(stream)
self.assertEqual(test_address, '')
class TestReplyExtraction(ZulipTestCase):
def test_is_forwarded(self) -> None:
self.assertTrue(is_forwarded("FWD: hey"))
self.assertTrue(is_forwarded("fwd: hi"))
self.assertTrue(is_forwarded("[fwd] subject"))
self.assertTrue(is_forwarded("FWD: RE:"))
self.assertTrue(is_forwarded("Fwd: RE: fwd: re: subject"))
self.assertFalse(is_forwarded("subject"))
self.assertFalse(is_forwarded("RE: FWD: hi"))
def test_reply_is_extracted_from_plain(self) -> None:
# build dummy messages for stream
# test valid incoming stream message is processed properly
self.login('hamlet')
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
text = """Reply
-----Original Message-----
Quote"""
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content(text)
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Reply")
# Don't extract if Subject indicates the email has been forwarded into the mirror:
del incoming_valid_message['Subject']
incoming_valid_message['Subject'] = 'FWD: TestStreamEmailMessages Subject'
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, text)
def test_reply_is_extracted_from_html(self) -> None:
# build dummy messages for stream
# test valid incoming stream message is processed properly
self.login('hamlet')
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
html = """
<html>
<body>
<p>Reply</p>
<blockquote>
<div>
On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:
</div>
<div>
Quote
</div>
</blockquote>
</body>
</html>
"""
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content(html, subtype="html")
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = user_profile.delivery_email
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = user_profile.delivery_email
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, 'Reply')
# Don't extract if Subject indicates the email has been forwarded into the mirror:
del incoming_valid_message['Subject']
incoming_valid_message['Subject'] = 'FWD: TestStreamEmailMessages Subject'
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, convert_html_to_markdown(html))
class TestScriptMTA(ZulipTestCase):
def test_success(self) -> None:
script = os.path.join(os.path.dirname(__file__),
'../../scripts/lib/email-mirror-postfix')
sender = self.example_email('hamlet')
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
mail_template = self.fixture_data('simple.txt', type='email')
mail = mail_template.format(stream_to_address=stream_to_address, sender=sender)
subprocess.run(
[script, '-r', stream_to_address, '-s', settings.SHARED_SECRET, '-t'],
input=mail,
check=True,
universal_newlines=True,
)
def test_error_no_recipient(self) -> None:
script = os.path.join(os.path.dirname(__file__),
'../../scripts/lib/email-mirror-postfix')
sender = self.example_email('hamlet')
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
mail_template = self.fixture_data('simple.txt', type='email')
mail = mail_template.format(stream_to_address=stream_to_address, sender=sender)
p = subprocess.run(
[script, '-s', settings.SHARED_SECRET, '-t'],
input=mail,
stdout=subprocess.PIPE,
universal_newlines=True,
)
self.assertEqual(
p.stdout,
'5.1.1 Bad destination mailbox address: No missed message email address.\n',
)
self.assertEqual(p.returncode, 67)
class TestEmailMirrorTornadoView(ZulipTestCase):
def send_private_message(self) -> str:
self.login('othello')
cordelia = self.example_user('cordelia')
iago = self.example_user('iago')
result = self.client_post(
"/json/messages",
{
"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": orjson.dumps([cordelia.id, iago.id]).decode(),
})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
user_message = most_recent_usermessage(user_profile)
return create_missed_message_address(user_profile, user_message.message)
def send_offline_message(self, to_address: str, sender: UserProfile) -> HttpResponse:
mail_template = self.fixture_data('simple.txt', type='email')
mail = mail_template.format(stream_to_address=to_address, sender=sender.delivery_email)
msg_base64 = base64.b64encode(mail.encode()).decode()
def check_queue_json_publish(queue_name: str,
event: Mapping[str, Any],
processor: Optional[Callable[[Any], None]]=None) -> None:
self.assertEqual(queue_name, "email_mirror")
self.assertEqual(event, {"rcpt_to": to_address, "msg_base64": msg_base64})
MirrorWorker().consume(event)
self.assertEqual(self.get_last_message().content,
"This is a plain-text message for testing Zulip.")
post_data = {
"rcpt_to": to_address,
"msg_base64": msg_base64,
"secret": settings.SHARED_SECRET,
}
with mock_queue_publish('zerver.lib.email_mirror.queue_json_publish') as m:
m.side_effect = check_queue_json_publish
return self.client_post('/email_mirror_message', post_data)
def test_success_stream(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
result = self.send_offline_message(stream_to_address, self.example_user('hamlet'))
self.assert_json_success(result)
def test_error_to_stream_with_wrong_address(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
# get the email_token:
token = decode_email_address(stream_to_address)[0]
stream_to_address = stream_to_address.replace(token, "Wrong_token")
result = self.send_offline_message(stream_to_address, self.example_user('hamlet'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: "
"Bad stream token from email recipient " + stream_to_address)
def test_success_to_stream_with_good_token_wrong_stream_name(self) -> None:
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
stream_to_address = stream_to_address.replace("denmark", "Wrong_name")
result = self.send_offline_message(stream_to_address, self.example_user('hamlet'))
self.assert_json_success(result)
def test_success_to_private(self) -> None:
mm_address = self.send_private_message()
result = self.send_offline_message(mm_address, self.example_user('cordelia'))
self.assert_json_success(result)
def test_using_mm_address_multiple_times(self) -> None:
mm_address = self.send_private_message()
for i in range(0, MissedMessageEmailAddress.ALLOWED_USES):
result = self.send_offline_message(mm_address, self.example_user('cordelia'))
self.assert_json_success(result)
result = self.send_offline_message(mm_address, self.example_user('cordelia'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: Missed message address out of uses.")
def test_wrong_missed_email_private_message(self) -> None:
self.send_private_message()
mm_address = 'mm' + ('x' * 32) + '@testserver'
result = self.send_offline_message(mm_address, self.example_user('cordelia'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: Missed message address expired or doesn't exist.")
class TestStreamEmailMessagesSubjectStripping(ZulipTestCase):
def test_process_message_strips_subject(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('TestStreamEmailMessages Body')
incoming_valid_message['Subject'] = "Re: Fwd: Re: Test"
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual("Test", message.topic_name())
# If after stripping we get an empty subject, it should get set to (no topic)
del incoming_valid_message['Subject']
incoming_valid_message['Subject'] = "Re: Fwd: Re: "
process_message(incoming_valid_message)
message = most_recent_message(user_profile)
self.assertEqual("(no topic)", message.topic_name())
def test_strip_from_subject(self) -> None:
subject_list = orjson.loads(self.fixture_data('subjects.json', type='email'))
for subject in subject_list:
stripped = strip_from_subject(subject['original_subject'])
self.assertEqual(stripped, subject['stripped_subject'])
# If the Content-Type header didn't specify a charset, the text content
# of the email used to not be properly found. Test that this is fixed:
class TestContentTypeUnspecifiedCharset(ZulipTestCase):
def test_charset_not_specified(self) -> None:
message_as_string = self.fixture_data('1.txt', type='email')
message_as_string = message_as_string.replace("Content-Type: text/plain; charset=\"us-ascii\"",
"Content-Type: text/plain")
incoming_message = message_from_string(message_as_string, policy=email.policy.default)
# https://github.com/python/typeshed/issues/2417
assert isinstance(incoming_message, EmailMessage)
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
del incoming_message['To']
incoming_message['To'] = stream_to_address
process_message(incoming_message)
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Email fixture 1.txt body")
class TestEmailMirrorProcessMessageNoValidRecipient(ZulipTestCase):
def test_process_message_no_valid_recipient(self) -> None:
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('Test Body')
incoming_valid_message['Subject'] = "Test Subject"
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = "address@wrongdomain, address@notzulip"
incoming_valid_message['Reply-to'] = self.example_email('othello')
with mock.patch("zerver.lib.email_mirror.log_and_report") as mock_log_and_report:
process_message(incoming_valid_message)
mock_log_and_report.assert_called_with(incoming_valid_message,
"Missing recipient in mirror email", None)
class TestEmailMirrorLogAndReport(ZulipTestCase):
def test_log_and_report(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "errors")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
address_parts = stream_to_address.split('@')
scrubbed_address = 'X'*len(address_parts[0]) + '@' + address_parts[1]
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('Test Body')
incoming_valid_message['Subject'] = "Test Subject"
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
with self.assertLogs('zerver.lib.email_mirror', 'ERROR') as error_log:
log_and_report(incoming_valid_message, "test error message", stream_to_address)
self.assertEqual(error_log.output, [
'ERROR:zerver.lib.email_mirror:Sender: hamlet@zulip.com\nTo: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX@testserver <Address to stream id: 1>\ntest error message'
])
message = most_recent_message(user_profile)
self.assertEqual("email mirror error", message.topic_name())
msg_content = message.content.strip('~').strip()
expected_content = "Sender: {}\nTo: {} <Address to stream id: {}>\ntest error message"
expected_content = expected_content.format(self.example_email('hamlet'), scrubbed_address,
stream.id)
self.assertEqual(msg_content, expected_content)
with self.assertLogs('zerver.lib.email_mirror', 'ERROR') as error_log:
log_and_report(incoming_valid_message, "test error message", None)
self.assertEqual(error_log.output, [
'ERROR:zerver.lib.email_mirror:Sender: hamlet@zulip.com\nTo: No recipient found\ntest error message'
])
message = most_recent_message(user_profile)
self.assertEqual("email mirror error", message.topic_name())
msg_content = message.content.strip('~').strip()
expected_content = "Sender: {}\nTo: No recipient found\ntest error message"
expected_content = expected_content.format(self.example_email('hamlet'))
self.assertEqual(msg_content, expected_content)
def test_log_and_report_no_errorbot(self) -> None:
with self.settings(ERROR_BOT=None):
incoming_valid_message = EmailMessage()
incoming_valid_message.set_content('Test Body')
incoming_valid_message['Subject'] = "Test Subject"
incoming_valid_message['From'] = self.example_email('hamlet')
with self.assertLogs(logger_name, level="ERROR") as m:
log_and_report(incoming_valid_message, "test error message", None)
expected_content = "Sender: {}\nTo: No recipient found\ntest error message"
expected_content = expected_content.format(self.example_email('hamlet'))
self.assertEqual(
m.output,
[f"ERROR:{logger_name}:{expected_content}"]
)
def test_redact_email_address(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "errors")
stream = get_stream("Denmark", user_profile.realm)
# Test for a stream address:
stream_to_address = encode_email_address(stream)
stream_address_parts = stream_to_address.split('@')
scrubbed_stream_address = 'X'*len(stream_address_parts[0]) + '@' + stream_address_parts[1]
error_message = "test message {}"
error_message = error_message.format(stream_to_address)
expected_message = "test message {} <Address to stream id: {}>"
expected_message = expected_message.format(scrubbed_stream_address, stream.id)
redacted_message = redact_email_address(error_message)
self.assertEqual(redacted_message, expected_message)
# Test for an invalid email address:
invalid_address = "invalid@testserver"
error_message = "test message {}"
error_message = error_message.format(invalid_address)
expected_message = "test message {} <Invalid address>"
expected_message = expected_message.format('XXXXXXX@testserver')
redacted_message = redact_email_address(error_message)
self.assertEqual(redacted_message, expected_message)
# Test for a missed message address:
cordelia = self.example_user('cordelia')
iago = self.example_user('iago')
result = self.client_post(
"/json/messages",
{
"type": "private",
"content": "test_redact_email_message",
"client": "test suite",
"to": orjson.dumps([cordelia.email, iago.email]).decode(),
})
self.assert_json_success(result)
cordelia_profile = self.example_user('cordelia')
user_message = most_recent_usermessage(cordelia_profile)
mm_address = create_missed_message_address(user_profile, user_message.message)
error_message = "test message {}"
error_message = error_message.format(mm_address)
expected_message = "test message {} <Missed message address>"
expected_message = expected_message.format('X'*34 + '@testserver')
redacted_message = redact_email_address(error_message)
self.assertEqual(redacted_message, expected_message)
# Test if redacting correctly scrubs multiple occurrences of the address:
error_message = "test message first occurrence: {} second occurrence: {}"
error_message = error_message.format(stream_to_address, stream_to_address)
expected_message = "test message first occurrence: {} <Address to stream id: {}>"
expected_message += " second occurrence: {} <Address to stream id: {}>"
expected_message = expected_message.format(scrubbed_stream_address, stream.id,
scrubbed_stream_address, stream.id)
redacted_message = redact_email_address(error_message)
self.assertEqual(redacted_message, expected_message)
# Test with EMAIL_GATEWAY_EXTRA_PATTERN_HACK:
with self.settings(EMAIL_GATEWAY_EXTRA_PATTERN_HACK='@zulip.org'):
stream_to_address = stream_to_address.replace('@testserver', '@zulip.org')
scrubbed_stream_address = scrubbed_stream_address.replace('@testserver', '@zulip.org')
error_message = "test message {}"
error_message = error_message.format(stream_to_address)
expected_message = "test message {} <Address to stream id: {}>"
expected_message = expected_message.format(scrubbed_stream_address, stream.id)
redacted_message = redact_email_address(error_message)
self.assertEqual(redacted_message, expected_message)
|
{
"content_hash": "e397f563bdca0cafbc762fb533becb66",
"timestamp": "",
"source": "github",
"line_count": 1458,
"max_line_length": 171,
"avg_line_length": 46.71604938271605,
"alnum_prop": 0.6399753347427766,
"repo_name": "showell/zulip",
"id": "95ed80197e3ba6e02a88aee80378db590cc110e7",
"size": "68130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_email_mirror.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
}
|
"""Select individuals into a new population trying to maintain diversity.
This selection mechanism seeks to try and get both high fitness levels
and high diversity levels in the population.
"""
# standard modules
import random
import math
# biopython
from Bio.Seq import MutableSeq
# local modules
from .Abstract import AbstractSelection
from .Tournament import TournamentSelection
class DiversitySelection(AbstractSelection):
"""Implement diversity selection.
Diversity selection is performed by trying to select individuals
from the population that aren't already in the new_population. A group
of selected individuals is then subjected to selection using
a passed selection routine.
If new individuals can not be selected, new individuals will be
randomly generated and inserted into the population.
"""
def __init__(self, internal_selector, genome_generator):
"""Initialize a diversity selector.
Arguments:
o internal_selector - A selection object that will be used to select
individuals based on fitness, perform crossover, mutation and repair.
o genome_generator - A function that, when called, will return a
genome to be used for a new organism. The genome returned must
be a MutableSeq() object.
"""
self._internal_selector = internal_selector
self._genome_generator = genome_generator
self.sub_pop_percent = .1
self.random_tries = 10
def _get_new_organism(self, new_pop, old_pop):
"""Get a new organism from old_pop that isn't in new_pop.
This attempts to select an organism from old_pop that isn't in
new_pop. If we can't do this in the number of tries specified
by the class attribute random_tries, we generate a new random
organism and return that.
"""
# try to pick an organism that isn't in the population
new_org = None
num_tries = 0
while new_org is None and num_tries < self.random_tries:
chance_org = random.choice(old_pop)
if chance_org not in new_pop:
new_org = chance_org
num_tries += 1
# if we don't get an organism, generate a random one
if new_org is None:
new_org = old_pop[0].copy()
random_genome = self._genome_generator()
new_org.genome = random_genome
new_org.recalculate_fitness()
return new_org
def select(self, population):
"""Perform selection on the current population, encouraging diversity.
"""
new_population = []
while len(new_population) < len(population):
# generate a sub population
sub_pop_size = int(math.ceil(len(population) *
self.sub_pop_percent))
sub_pop = []
for individual in range(sub_pop_size):
new_org = self._get_new_organism(new_population, population)
sub_pop.append(new_org)
# put the new sub population through selection, mutation
# and all of that good stuff
new_sub_pop = self._internal_selector.select(sub_pop)
new_population.extend(new_sub_pop)
# return the new population, which should have the same number
# of individuals we started with.
return new_population[:len(population)]
|
{
"content_hash": "72f31d50b56ee7e6391d7612a927a00b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 35.402061855670105,
"alnum_prop": 0.6429819452533488,
"repo_name": "zjuchenyuan/BioWeb",
"id": "d9b1bceb3229721f513f01394214ab7e2a2d972a",
"size": "3605",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/Bio/GA/Selection/Diversity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "22925"
},
{
"name": "Batchfile",
"bytes": "143"
},
{
"name": "C",
"bytes": "414849"
},
{
"name": "CSS",
"bytes": "84526"
},
{
"name": "HTML",
"bytes": "6119"
},
{
"name": "Perl",
"bytes": "11818"
},
{
"name": "Python",
"bytes": "6614790"
}
],
"symlink_target": ""
}
|
from collections import Counter
if __name__ == '__main__':
k = int(input())
room_captain = Counter(map(int, input().split())).most_common()[:-2:-1]
print(room_captain[0][0])
|
{
"content_hash": "19a650fd0605123448259a82c9b448b5",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 76,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.5935828877005348,
"repo_name": "neiesc/Problem-solving",
"id": "f9ce968f21d6e6f401b8601663fad589082a13f8",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "HackerRank/Python/Sets/py-the-captains-room.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "42048"
},
{
"name": "Elixir",
"bytes": "7313"
},
{
"name": "JavaScript",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "58944"
},
{
"name": "Scala",
"bytes": "333"
},
{
"name": "Shell",
"bytes": "317"
}
],
"symlink_target": ""
}
|
import logging
from typing import Tuple
from praw.models import Comment, Submission # type: ignore
from tor import __BOT_NAMES__
from tor.core.config import Config
from tor.core.helpers import clean_id, flair, send_to_modchat
from tor.core.users import User
log = logging.getLogger(__name__)
def flair_post(post: Submission, text: str) -> None:
"""
Sets the requested flair on a given post. Must provide a string
which matches an already-available flair template.
:param post: A Submission object on ToR.
:param text: String. The name of the flair template to apply.
:return: None.
"""
# Flair looks like this:
# {
# 'flair_css_class': 'unclaimed-flair',
# 'flair_template_id': 'fe9d6950-142a-11e7-901e-0ecc947f9ff4',
# 'flair_text_editable': False,
# 'flair_position': 'left',
# 'flair_text': 'Unclaimed'
# }
for choice in post.flair.choices():
if choice['flair_text'] == text:
post.flair.select(
flair_template_id=choice['flair_template_id']
)
return
# if the flairing is successful, we won't hit this line.
log.error(f'Cannot find requested flair {text}. Not flairing.')
def _get_flair_css(transcription_count: int) -> str:
if transcription_count >= 10000:
return 'grafeas-jade'
elif transcription_count >= 5000:
return 'grafeas-topaz'
elif transcription_count >= 2500:
return 'grafeas-ruby'
elif transcription_count >= 1000:
return 'grafeas-diamond'
elif transcription_count >= 500:
return 'grafeas-golden'
elif transcription_count >= 250:
return 'grafeas-purple'
elif transcription_count >= 100:
return 'grafeas-teal'
elif transcription_count >= 50:
return 'grafeas-green'
else:
return 'grafeas'
def _parse_existing_flair(user_flair: str) -> Tuple[int, str]:
"""
Take the flair string and identify the proper incremented score along with
its matching CSS class.
:param user_flair: String; the existing flair string for the user.
:return:
"""
# extract their current flair and add one to it
new_flair_count = int(user_flair[:user_flair.index('Γ') - 1]) + 1
css = _get_flair_css(new_flair_count)
return new_flair_count, css
def update_user_flair(post: Comment, cfg: Config) -> None:
"""
On a successful transcription, this takes the user's current flair,
increments the counter by one, and stores it back to the subreddit.
If the user is past 50 transcriptions, select the appropriate flair
class and write that back too.
:param post: The post which holds the author information.
:param cfg: The global config instance.
:return: None.
"""
flair_text = '{} Γ - Beta Tester'
post_author = User(str(post.author), redis_conn=cfg.redis)
current_transcription_count = post_author.get('transcriptions', 0)
try:
# The post object is technically an inbox mention, even though it's
# a Comment object. In order to get the flair, we have to take the
# ID of our post object and re-request it from Reddit in order to
# get the *actual* object, even though they have the same ID. It's
# weird.
user_flair = cfg.r.comment(id=clean_id(post.fullname)).author_flair_text
except AttributeError:
user_flair = flair_text.format('0')
if not user_flair:
# HOLD ON. Do we have one saved? Maybe Reddit's screwing up.
if current_transcription_count != 0:
# we have a user object for them and shouldn't have landed here.
user_flair = flair_text.format(current_transcription_count)
else:
user_flair = flair_text.format('0')
if 'Γ' in user_flair:
new_count, flair_css = _parse_existing_flair(user_flair)
# if there's anything special in their flair string, let's save it
additional_flair_text = user_flair[user_flair.index('Γ') + 1:]
user_flair = f'{new_count} Γ'
# add in that special flair bit back in to keep their flair intact
user_flair += additional_flair_text
cfg.tor.flair.set(post.author, text=user_flair, css_class=flair_css)
log.info(f'Setting flair for {post.author}')
post_author.update('transcriptions', current_transcription_count + 1)
post_author.save()
def set_meta_flair_on_other_posts(cfg: Config) -> None:
"""
Loops through the 10 newest posts on ToR and sets the flair to
'Meta' for any post that is not authored by the bot or any of
the moderators.
:param cfg: the active config object.
:return: None.
"""
for post in cfg.tor.new(limit=10):
if str(post.author) in __BOT_NAMES__:
continue
if str(post.author) in cfg.tor_mods:
continue
if post.link_flair_text == flair.meta:
continue
log.info(f'Flairing post {post.fullname} by author {post.author} with Meta.')
flair_post(post, flair.meta)
send_to_modchat(
f'New meta post: <{post.shortlink}|{post.title}>',
cfg
)
|
{
"content_hash": "2280e38a4d589dd270f2ef02b935ddd2",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 85,
"avg_line_length": 33.91503267973856,
"alnum_prop": 0.63923684717672,
"repo_name": "TranscribersOfReddit/TranscribersOfReddit",
"id": "cadfd85c18206dc91d133e948455dc381bda3d62",
"size": "5194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tor/helpers/flair.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "257"
},
{
"name": "Python",
"bytes": "43722"
}
],
"symlink_target": ""
}
|
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True,
fast_mode=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
logits=end_points['AuxLogits'], onehot_labels=labels,
label_smoothing=FLAGS.label_smoothing, weights=0.4, scope='aux_loss')
tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels,
label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
variable_averages=variable_averages,
variables_to_average=moving_average_variables,
replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
total_num_replicas=FLAGS.worker_replicas)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None)
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "6287f61f4be7f0d7630f232f32fd1781",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 81,
"avg_line_length": 34.67262969588551,
"alnum_prop": 0.634041894541327,
"repo_name": "r888888888/models",
"id": "82394464fae53c39b672288d56ed292294acac63",
"size": "20067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slim/train_image_classifier.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1219693"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "61102"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "24249"
},
{
"name": "Python",
"bytes": "3384988"
},
{
"name": "Shell",
"bytes": "61126"
}
],
"symlink_target": ""
}
|
from asynq import asynq, result
from .helpers import Profiler
counter = 0
def test():
global counter
@asynq(pure=True)
def incr():
global counter
counter += 1
print("Counter: %i" % counter)
return counter
yield
def sync_incr():
global counter
counter += 1
print("Counter: %i" % counter)
return counter
@asynq(pure=True)
def test_async():
global counter
try:
print("In try block.")
yield incr()
result((yield incr()))
except BaseException as e:
print("In except block, e = " + repr(e))
assert sync_incr() == 3
if isinstance(e, GeneratorExit):
raise
assert False, "Must not reach this point!"
finally:
print("In finally block.")
assert sync_incr() == 4
with Profiler("test_stop()"):
r = test_async()()
assert r == 2
assert counter == 4
print()
|
{
"content_hash": "1ada12b5952cc36e5042277ca5926f21",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 54,
"avg_line_length": 22.955555555555556,
"alnum_prop": 0.5082284607938045,
"repo_name": "quora/asynq",
"id": "e159d2774069b75b7b0dc6c7c3e5c73943640180",
"size": "1607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asynq/tests/test_yield_result.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "14533"
},
{
"name": "Python",
"bytes": "215286"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
from scipy.optimize import minimize
import cv2
import image_utils as utils
from my_gradient_map import gradient_map
img_list = []
N = 30
gmap = None
weight = 1
# TODO: This function is the same as the one in diffuse light
def sum_lambda_int(l, r, c):
# channels = img_list[0].shape[2]
sum = 0
# sum = np.zeros(channels)
i = 0
for img in img_list:
px = img[r, c]
sum = l[i] * px
i += 1
return sum
def sum_images(l, images):
# channels = img_list[0].shape[2]
return_image = np.zeros_like(images[0])
# sum = np.zeros(channels)
for i, img in enumerate(images):
return_image += np.multiply(l[i], img)
return return_image
def edge_light(l):
global gmap
global weight
rows = img_list[0].shape[0]
cols = img_list[0].shape[1]
res = 0
for r in range(rows):
for c in range(cols):
# print "sum_lambda %s" % sum_lambda_int(l, r, c)
# print "gradient map %s" % (gradient_map**2)
# diff = np.gradient(sum_lambda_int(l, r, c))
# print diff
# gradient = np.arctan(diff[1]/diff[0]) * 180 / np.pi
gradient = sum_lambda_int(l, r, c)
res += weight.item(r, c) * np.linalg.norm(gradient -
(gmap.item(r, c)**2))**2
print "lambda: %s" % l
print "sum: %s" % res
return res
def main():
global img_list
global gmap
global weight
img_list = utils.read_images("../../test_data/cafe", N, downsample=3)
full_img_list = utils.read_images("../../test_data/cafe", N)
gray_imgs = utils.read_images("../../test_data/cafe", N, gray=True)
x0 = np.full(N, 1.0/N)
(gmap, weight) = gradient_map(gray_imgs)
bnds = []
for i in range(len(img_list)):
bnds.append((0, 1))
lambdas = minimize(edge_light, x0, method='TNC', jac=False,
bounds=bnds)
ret_image = sum_images(lambdas.x, full_img_list)
print lambdas.message
print "Choice of lambdas = %s" % (lambdas.x)
cv2.imwrite('output_edge.png', utils.denormalize_img(ret_image))
cv2.imshow('image', ret_image)
cv2.waitKey(0)
main()
|
{
"content_hash": "30b610d4414bfd5cca38acc6588eb7d9",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 24.67032967032967,
"alnum_prop": 0.5630289532293986,
"repo_name": "tjaartvdwalt/light-compositing",
"id": "9d4b4aa7308deca7be7c14caac179744b9c3f6d3",
"size": "2268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "light_compositing/experiments/edge_light.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55124"
}
],
"symlink_target": ""
}
|
from schedule import Schedule
class SSGS(object):
def __init__(self, problem):
self.problem = problem
def get_schedule(self, activity_list):
S = Schedule(self.problem)
self.al_iter = activity_list.__iter__()
for i in xrange(self.problem.num_activities):
activity = self._select_activity()
precedence_feasible_start = S.earliest_precedence_start(activity)
real_start = self._compute_real_start(S, activity, precedence_feasible_start)
S.add(activity, real_start, force=True)
return S
def _select_activity(self):
return next(self.al_iter)
def _compute_real_start(self, schedule, activity, precedence_feasible_start):
real_start = 0
for t in sorted(schedule.finish_times.keys()):
if precedence_feasible_start <= t and schedule.can_place(activity, t, skip_check_precedence=True):
real_start = t
break
return real_start
|
{
"content_hash": "4c470b61a227c1404df7a5faae76277a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 110,
"avg_line_length": 37,
"alnum_prop": 0.6236236236236237,
"repo_name": "Artimi/ukko",
"id": "b3c5b7280b5a1dbb59eb411a723e50aa23dc188d",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ukko/sgs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43305"
}
],
"symlink_target": ""
}
|
import os
import re
from typing import Optional
# Possible values for env variables
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
def _is_true(value: Optional[str]) -> bool:
if value is None:
return False
return value.upper() in ENV_VARS_TRUE_VALUES
def _is_true_or_auto(value: Optional[str]) -> bool:
if value is None:
return False
return value.upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
# Constants for file downloads
PYTORCH_WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
CONFIG_NAME = "config.json"
REPOCARD_NAME = "README.md"
# Git-related constants
DEFAULT_REVISION = "main"
REGEX_COMMIT_OID = re.compile(r"[A-Fa-f0-9]{5,40}")
HUGGINGFACE_CO_URL_HOME = "https://huggingface.co/"
_staging_mode = _is_true(os.environ.get("HUGGINGFACE_CO_STAGING"))
ENDPOINT = os.getenv("HF_ENDPOINT") or (
"https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co"
)
HUGGINGFACE_CO_URL_TEMPLATE = ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
HUGGINGFACE_HEADER_X_REPO_COMMIT = "X-Repo-Commit"
HUGGINGFACE_HEADER_X_LINKED_ETAG = "X-Linked-Etag"
HUGGINGFACE_HEADER_X_LINKED_SIZE = "X-Linked-Size"
REPO_ID_SEPARATOR = "--"
# ^ this substring is not allowed in repo_ids on hf.co
# and is the canonical one we use for serialization of repo ids elsewhere.
REPO_TYPE_DATASET = "dataset"
REPO_TYPE_SPACE = "space"
REPO_TYPE_MODEL = "model"
REPO_TYPES = [None, REPO_TYPE_MODEL, REPO_TYPE_DATASET, REPO_TYPE_SPACE]
SPACES_SDK_TYPES = ["gradio", "streamlit", "static"]
REPO_TYPES_URL_PREFIXES = {
REPO_TYPE_DATASET: "datasets/",
REPO_TYPE_SPACE: "spaces/",
}
REPO_TYPES_MAPPING = {
"datasets": REPO_TYPE_DATASET,
"spaces": REPO_TYPE_SPACE,
"models": REPO_TYPE_MODEL,
}
# default cache
default_home = os.path.join(os.path.expanduser("~"), ".cache")
hf_cache_home = os.path.expanduser(
os.getenv(
"HF_HOME",
os.path.join(os.getenv("XDG_CACHE_HOME", default_home), "huggingface"),
)
)
default_cache_path = os.path.join(hf_cache_home, "hub")
default_assets_cache_path = os.path.join(hf_cache_home, "assets")
HUGGINGFACE_HUB_CACHE = os.getenv("HUGGINGFACE_HUB_CACHE", default_cache_path)
HUGGINGFACE_ASSETS_CACHE = os.getenv(
"HUGGINGFACE_ASSETS_CACHE", default_assets_cache_path
)
HF_HUB_OFFLINE = _is_true(os.environ.get("HF_HUB_OFFLINE"))
# Here, `True` will disable progress bars globally without possibility of enabling it
# programmatically. `False` will enable them without possibility of disabling them.
# If environment variable is not set (None), then the user is free to enable/disable
# them programmatically.
# TL;DR: env variable has priority over code
__HF_HUB_DISABLE_PROGRESS_BARS = os.environ.get("HF_HUB_DISABLE_PROGRESS_BARS")
HF_HUB_DISABLE_PROGRESS_BARS: Optional[bool] = (
_is_true(__HF_HUB_DISABLE_PROGRESS_BARS)
if __HF_HUB_DISABLE_PROGRESS_BARS is not None
else None
)
# Disable warning on machines that do not support symlinks (e.g. Windows non-developer)
HF_HUB_DISABLE_SYMLINKS_WARNING: bool = _is_true(
os.environ.get("HF_HUB_DISABLE_SYMLINKS_WARNING")
)
# Disable sending the cached token by default is all HTTP requests to the Hub
HF_HUB_DISABLE_IMPLICIT_TOKEN: bool = _is_true(
os.environ.get("HF_HUB_DISABLE_IMPLICIT_TOKEN")
)
|
{
"content_hash": "b23ad97265a44928aa964e3599fa79dc",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 87,
"avg_line_length": 30.601769911504423,
"alnum_prop": 0.7082128397917872,
"repo_name": "huggingface/huggingface_hub",
"id": "c08cf1bbdcf01c45898d1984490b1e9ea32051d5",
"size": "3458",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/huggingface_hub/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "338"
},
{
"name": "Python",
"bytes": "1086946"
}
],
"symlink_target": ""
}
|
"""The pi_hole component."""
import asyncio
import logging
from hole import Hole
from hole.exceptions import HoleError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_NAME,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
CONF_LOCATION,
CONF_STATISTICS_ONLY,
DATA_KEY_API,
DATA_KEY_COORDINATOR,
DEFAULT_LOCATION,
DEFAULT_NAME,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MIN_TIME_BETWEEN_UPDATES,
)
_LOGGER = logging.getLogger(__name__)
PI_HOLE_SCHEMA = vol.Schema(
vol.All(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_LOCATION, default=DEFAULT_LOCATION): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
},
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [PI_HOLE_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Pi-hole integration."""
hass.data[DOMAIN] = {}
# import
if DOMAIN in config:
for conf in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up Pi-hole entry."""
name = entry.data[CONF_NAME]
host = entry.data[CONF_HOST]
use_tls = entry.data[CONF_SSL]
verify_tls = entry.data[CONF_VERIFY_SSL]
location = entry.data[CONF_LOCATION]
api_key = entry.data.get(CONF_API_KEY)
# For backward compatibility
if CONF_STATISTICS_ONLY not in entry.data:
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_STATISTICS_ONLY: not api_key}
)
_LOGGER.debug("Setting up %s integration with host %s", DOMAIN, host)
try:
session = async_get_clientsession(hass, verify_tls)
api = Hole(
host,
hass.loop,
session,
location=location,
tls=use_tls,
api_token=api_key,
)
await api.get_data()
except HoleError as ex:
_LOGGER.warning("Failed to connect: %s", ex)
raise ConfigEntryNotReady from ex
async def async_update_data():
"""Fetch data from API endpoint."""
try:
await api.get_data()
except HoleError as err:
raise UpdateFailed(f"Failed to communicate with API: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=name,
update_method=async_update_data,
update_interval=MIN_TIME_BETWEEN_UPDATES,
)
hass.data[DOMAIN][entry.entry_id] = {
DATA_KEY_API: api,
DATA_KEY_COORDINATOR: coordinator,
}
for platform in _async_platforms(entry):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass, entry):
"""Unload Pi-hole entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in _async_platforms(entry)
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
@callback
def _async_platforms(entry):
"""Return platforms to be loaded / unloaded."""
platforms = ["sensor"]
if not entry.data[CONF_STATISTICS_ONLY]:
platforms.append("switch")
else:
platforms.append("binary_sensor")
return platforms
class PiHoleEntity(CoordinatorEntity):
"""Representation of a Pi-hole entity."""
def __init__(self, api, coordinator, name, server_unique_id):
"""Initialize a Pi-hole entity."""
super().__init__(coordinator)
self.api = api
self._name = name
self._server_unique_id = server_unique_id
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:pi-hole"
@property
def device_info(self):
"""Return the device information of the entity."""
return {
"identifiers": {(DOMAIN, self._server_unique_id)},
"name": self._name,
"manufacturer": "Pi-hole",
}
|
{
"content_hash": "f1384aa26f5885f799e1cf41474b5f95",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 82,
"avg_line_length": 27.254054054054055,
"alnum_prop": 0.6150337167790559,
"repo_name": "adrienbrault/home-assistant",
"id": "bc486a0c9014ece546298def8343cbe70c6133df",
"size": "5042",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/pi_hole/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import hashlib
import importlib
import json
from datetime import datetime
import django
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import (HttpResponseRedirect,
HttpResponse,
HttpResponseForbidden)
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from .formats import base_formats
from .forms import (
ImportForm,
ConfirmImportForm,
ExportForm,
export_action_form_factory,
PreImportForm
)
from .resources import (
modelresource_factory,
)
from .results import RowResult
from .tmp_storages import TempFolderStorage
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
SKIP_ADMIN_LOG = getattr(settings, 'IMPORT_EXPORT_SKIP_ADMIN_LOG', False)
TMP_STORAGE_CLASS = getattr(settings, 'IMPORT_EXPORT_TMP_STORAGE_CLASS',
TempFolderStorage)
if isinstance(TMP_STORAGE_CLASS, six.string_types):
try:
# Nod to tastypie's use of importlib.
parts = TMP_STORAGE_CLASS.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
TMP_STORAGE_CLASS = getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for import_export setting 'IMPORT_EXPORT_TMP_STORAGE_CLASS'" % TMP_STORAGE_CLASS
raise ImportError(msg)
#: import / export formats
DEFAULT_FORMATS = (
base_formats.CSV,
base_formats.XLS,
base_formats.XLSX,
base_formats.TSV,
base_formats.ODS,
base_formats.JSON,
base_formats.YAML,
base_formats.HTML,
)
class ImportExportMixinBase(object):
def get_model_info(self):
# module_name is renamed to model_name in Django 1.8
app_label = self.model._meta.app_label
try:
return (app_label, self.model._meta.model_name,)
except AttributeError:
return (app_label, self.model._meta.module_name,)
class ImportMixin(ImportExportMixinBase):
"""
Import mixin.
"""
#: template for change_list view
change_list_template = 'admin/import_export/change_list_import.html'
#: template for import view
import_template_name = 'admin/import_export/import.html'
#: resource class
resource_class = None
#: import resource_class
import_resource_class = None
#: available import formats
formats = DEFAULT_FORMATS
#: import data encoding
from_encoding = "utf-8"
skip_admin_log = None
# storage class for saving temporary files
tmp_storage_class = None
def get_skip_admin_log(self):
if self.skip_admin_log is None:
return SKIP_ADMIN_LOG
else:
return self.skip_admin_log
def get_tmp_storage_class(self):
if self.tmp_storage_class is None:
return TMP_STORAGE_CLASS
else:
return self.tmp_storage_class
pattern_prefix = ''
def get_urls(self):
urls = super(ImportMixin, self).get_urls()
info = self.get_model_info()
my_urls = [
url(r'^process_import/$',
self.admin_site.admin_view(self.process_import),
name='%s_%s_process_import' % info),
url(r'^{}import/$'.format(self.pattern_prefix),
self.admin_site.admin_view(self.import_action),
name='%s_%s_import' % info),
]
return my_urls + urls
def get_resource_class(self):
if not self.resource_class:
return modelresource_factory(self.model)
else:
return self.resource_class
def get_import_resource_class(self):
"""
Returns ResourceClass to use for export.
"""
if not self.import_resource_class:
return self.get_resource_class()
return self.import_resource_class
def get_import_formats(self):
"""
Returns available import formats.
"""
return [f for f in self.formats if f().can_import()]
def process_import(self, request, *args, **kwargs):
'''
Perform the actual import action (after the user has confirmed he
wishes to import)
'''
opts = self.model._meta
resource = self.get_import_resource_class()()
confirm_form = ConfirmImportForm(request.POST)
if confirm_form.is_valid():
import_formats = self.get_import_formats()
input_format = import_formats[
int(confirm_form.cleaned_data['input_format'])
]()
tmp_storage = self.get_tmp_storage_class()(name=confirm_form.cleaned_data['import_file_name'])
data = tmp_storage.read(input_format.get_read_mode())
if not input_format.is_binary() and self.from_encoding:
data = force_text(data, self.from_encoding)
dataset = input_format.create_dataset(data)
result = resource.import_data(dataset, dry_run=False,
raise_errors=True,
file_name=confirm_form.cleaned_data['original_file_name'],
user=request.user)
if not self.get_skip_admin_log():
# Add imported objects to LogEntry
logentry_map = {
RowResult.IMPORT_TYPE_NEW: ADDITION,
RowResult.IMPORT_TYPE_UPDATE: CHANGE,
RowResult.IMPORT_TYPE_DELETE: DELETION,
}
content_type_id = ContentType.objects.get_for_model(self.model).pk
for row in result:
if row.import_type != row.IMPORT_TYPE_SKIP:
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=content_type_id,
object_id=row.object_id,
object_repr=row.object_repr,
action_flag=logentry_map[row.import_type],
change_message="%s through import_export" % row.import_type,
)
success_message = _('Import finished')
messages.success(request, success_message)
tmp_storage.remove()
url = reverse('admin:%s_%s_changelist' % self.get_model_info(),
current_app=self.admin_site.name)
return HttpResponseRedirect(url)
return HttpResponseForbidden()
def import_action(self, request, *args, **kwargs):
'''
Perform a dry_run of the import to make sure the import will not
result in errors. If there where no error, save the user
uploaded file to a local temp file that will be used by
'process_import' for the actual import.
'''
resource = self.get_import_resource_class()()
context = {}
import_formats = self.get_import_formats()
form = ImportForm(import_formats,
request.POST or None,
request.FILES or None)
if request.POST and form.is_valid():
input_format = import_formats[
int(form.cleaned_data['input_format'])]()
import_file = form.cleaned_data['import_file']
# first always write the uploaded file to disk as it may be a
# memory file or else based on settings upload handlers
tmp_storage = self.get_tmp_storage_class()()
data = bytes()
for chunk in import_file.chunks():
data += chunk
tmp_storage.save(data, input_format.get_read_mode())
# then read the file, using the proper format-specific mode
# warning, big files may exceed memory
try:
data = tmp_storage.read(input_format.get_read_mode())
if not input_format.is_binary() and self.from_encoding:
data = force_text(data, self.from_encoding)
dataset = input_format.create_dataset(data)
except UnicodeDecodeError as e:
return HttpResponse(_(u"<h1>Imported file is not in unicode: %s</h1>" % e))
except Exception as e:
return HttpResponse(_(u"<h1>%s encountred while trying to read file: %s</h1>" % (type(e).__name__, e)))
result = resource.import_data(dataset, dry_run=True,
raise_errors=False,
file_name=import_file.name,
user=request.user)
context['result'] = result
if not result.has_errors():
context['confirm_form'] = ConfirmImportForm(initial={
'import_file_name': tmp_storage.name,
'original_file_name': import_file.name,
'input_format': form.cleaned_data['input_format'],
})
if django.VERSION >= (1, 8, 0):
context.update(self.admin_site.each_context(request))
elif django.VERSION >= (1, 7, 0):
context.update(self.admin_site.each_context())
context['form'] = form
context['opts'] = self.model._meta
context['fields'] = [f.column_name for f in resource.get_fields()]
context['media'] = self.media + form.media
return TemplateResponse(request, [self.import_template_name],
context, current_app=self.admin_site.name)
class GenericImportMixin(ImportMixin):
'''
Add import mapping file step
'''
change_list_template = 'admin/import_export/generic/change_list.html'
pre_import_template_name = 'admin/import_export/generic/pre_import.html'
import_template_name = 'admin/import_export/generic/import.html'
#: predefined field rules for generic format. as example [('Primary Key', 'id'), ('Book name', 'name'), ('author email', 'author_email')]
predefined_field_rules = None
@staticmethod
def header_hash(headers):
return hashlib.sha1('|'.join(map(smart_str, headers))).hexdigest()
def get_urls(self):
info = self.get_model_info()
urls = ImportMixin.get_urls(self)
my_urls = [
url(r'^{}pre_import/$'.format(self.pattern_prefix),
self.admin_site.admin_view(self.pre_import_action),
name='%s_%s_pre_import' % info),
]
return my_urls + urls
def get_predefined_field_rules_json_map(self):
'''
return {'sha1hash of headers': 'jsob_string'}
'''
predefined_rules = {}
if self.predefined_field_rules is None:
return predefined_rules
for rule in self.predefined_field_rules:
rule_hash = self.header_hash(
headers=[header for header, field in rule])
predefined_rules[rule_hash] = json.dumps(dict(rule))
return predefined_rules
def pre_convert_dataset(self, dataset, rule, **kwargs):
"""
:param dataset: Dataset
:param rule: {
'Column name': 'resource_field',
}
:return:
"""
def convert_dataset_by_rule(self, dataset, rule, **kwargs):
"""
:param dataset: Dataset
:param rule: {
'Column name': 'resource_field',
}
:return:
"""
rule = {smart_str(k): smart_str(v) for k, v in rule.items()}
resource = self.get_import_resource_class()()
resource_fields = resource.fields.keys()
dataset.headers = map(smart_str, dataset.headers)
delete_headers = [h for h in dataset.headers if h not in rule and h not in resource_fields]
for header in delete_headers:
del dataset[header]
new_headers = []
for h in dataset.headers:
if h in rule:
new_headers.append(rule[h])
elif h in resource_fields:
new_headers.append(h)
dataset.headers = new_headers
return dataset
def post_convert_dataset(self, dataset, rule, **kwargs):
"""
:param dataset: Dataset
:param rule: {
'Column name': 'resource_field',
}
:return:
"""
resource = self.get_import_resource_class()()
empty_fields = set(resource.fields.keys()) ^ set(dataset.headers)
for f in empty_fields:
dataset.insert_col(0, (lambda row: ''), header=f)
def import_action(self, request, *args, **kwargs):
'''
Perform a dry_run of the import to make sure the import will not
result in errors. If there where no error, save the user
uploaded file to a local temp file that will be used by
'process_import' for the actual import.
'''
resource = self.get_import_resource_class()()
context = {}
import_formats = self.get_import_formats()
form = PreImportForm(request.POST or None,
request.FILES or None)
if request.POST and form.is_valid():
input_format = import_formats[
int(form.cleaned_data['input_format'])]()
import_rule = form.cleaned_data['import_rule']
# first always write the uploaded file to disk as it may be a
# memory file or else based on settings upload handlers
tmp_storage = self.get_tmp_storage_class()(name=form.cleaned_data['import_file_name'])
# then read the file, using the proper format-specific mode
# warning, big files may exceed memory
try:
data = tmp_storage.read(input_format.get_read_mode())
if not input_format.is_binary() and self.from_encoding:
data = force_text(data, self.from_encoding)
dataset = input_format.create_dataset(data)
except UnicodeDecodeError as e:
return HttpResponse(_(u"<h1>Imported file is not in unicode: %s</h1>" % e))
except Exception as e:
return HttpResponse(_(u"<h1>%s encountred while trying to read file: %s</h1>" % (type(e).__name__, e)))
self.pre_convert_dataset(dataset, import_rule, **kwargs)
dataset = self.convert_dataset_by_rule(dataset, import_rule,
**kwargs)
self.post_convert_dataset(dataset, import_rule, **kwargs)
result = resource.import_data(dataset, dry_run=True,
raise_errors=False,
file_name=form.cleaned_data['original_file_name'],
user=request.user)
context['result'] = result
if not result.has_errors():
tmp_storage = self.get_tmp_storage_class()()
tmp_storage.save(input_format.export_data(dataset), input_format.get_read_mode())
context['confirm_form'] = ConfirmImportForm(initial={
'import_file_name': tmp_storage.name,
'original_file_name': form.cleaned_data['original_file_name'],
'input_format': form.cleaned_data['input_format'],
})
if django.VERSION >= (1, 8, 0):
context.update(self.admin_site.each_context(request))
elif django.VERSION >= (1, 7, 0):
context.update(self.admin_site.each_context())
context['form'] = form
context['opts'] = self.model._meta
context['fields'] = [f.column_name for f in resource.get_fields()]
context['media'] = self.media + form.media
return TemplateResponse(request, [self.import_template_name],
context, current_app=self.admin_site.name)
def pre_import_action(self, request, *args, **kwargs):
'''
'''
resource = self.get_import_resource_class()()
context = {}
import_formats = self.get_import_formats()
form = ImportForm(import_formats,
request.POST or None,
request.FILES or None)
if request.POST and form.is_valid():
input_format = import_formats[
int(form.cleaned_data['input_format'])]()
import_file = form.cleaned_data['import_file']
# first always write the uploaded file to disk as it may be a
# memory file or else based on settings upload handlers
tmp_storage = self.get_tmp_storage_class()()
data = bytes()
for chunk in import_file.chunks():
data += chunk
tmp_storage.save(data, input_format.get_read_mode())
# then read the file, using the proper format-specific mode
# warning, big files may exceed memory
try:
data = tmp_storage.read(input_format.get_read_mode())
if not input_format.is_binary() and self.from_encoding:
data = force_text(data, self.from_encoding)
dataset = input_format.create_dataset(data)
except UnicodeDecodeError as e:
return HttpResponse(_(u"<h1>Imported file is not in unicode: %s</h1>" % e))
except Exception as e:
return HttpResponse(_(u"<h1>%s encountred while trying to read file: %s</h1>" % (type(e).__name__, e)))
context['dataset'] = dataset
context['header_hash'] = self.header_hash(dataset.headers)
context['confirm_form'] = PreImportForm(initial={
'import_file_name': tmp_storage.name,
'original_file_name': import_file.name,
'input_format': form.cleaned_data['input_format'],
})
if django.VERSION >= (1, 8, 0):
context.update(self.admin_site.each_context(request))
elif django.VERSION >= (1, 7, 0):
context.update(self.admin_site.each_context())
context["choice_fields"] = resource.get_fields_display()
context['predefined_field_rules'] = self.get_predefined_field_rules_json_map()
context['form'] = form
context['opts'] = self.model._meta
context['fields'] = [f.column_name for f in resource.get_fields()]
context['media'] = self.media + form.media
return TemplateResponse(request, [self.pre_import_template_name],
context, current_app=self.admin_site.name)
class ExportMixin(ImportExportMixinBase):
"""
Export mixin.
"""
#: export resource class
export_resource_class = None
#: resource class
resource_class = None
#: template for change_list view
change_list_template = 'admin/import_export/change_list_export.html'
#: template for export view
export_template_name = 'admin/import_export/export.html'
#: available import formats
formats = DEFAULT_FORMATS
#: export data encoding
to_encoding = "utf-8"
def get_urls(self):
urls = super(ExportMixin, self).get_urls()
my_urls = [
url(r'^export/$',
self.admin_site.admin_view(self.export_action),
name='%s_%s_export' % self.get_model_info()),
]
return my_urls + urls
def get_resource_class(self):
if not self.resource_class:
return modelresource_factory(self.model)
else:
return self.resource_class
def get_export_resource_class(self):
"""
Returns ResourceClass to use for export.
"""
return self.export_resource_class or self.get_resource_class()
def get_export_formats(self):
"""
Returns available import formats.
"""
return [f for f in self.formats if f().can_export()]
def get_export_filename(self, file_format):
date_str = datetime.now().strftime('%Y-%m-%d')
filename = "%s-%s.%s" % (self.model.__name__,
date_str,
file_format.get_extension())
return filename
def get_export_queryset(self, request):
"""
Returns export queryset.
Default implementation respects applied search and filters.
"""
# copied from django/contrib/admin/options.py
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
ChangeList = self.get_changelist(request)
cl = ChangeList(request, self.model, list_display,
list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields,
self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable,
self)
# query_set has been renamed to queryset in Django 1.8
try:
return cl.queryset
except AttributeError:
return cl.query_set
def get_export_data(self, file_format, queryset):
"""
Returns file_format representation for given queryset.
"""
resource_class = self.get_export_resource_class()
data = resource_class().export(queryset)
export_data = file_format.export_data(data)
return export_data
def export_action(self, request, *args, **kwargs):
formats = self.get_export_formats()
form = ExportForm(formats, request.POST or None)
if form.is_valid():
file_format = formats[
int(form.cleaned_data['file_format'])
]()
queryset = self.get_export_queryset(request)
export_data = self.get_export_data(file_format, queryset)
content_type = file_format.get_content_type()
# Django 1.7 uses the content_type kwarg instead of mimetype
try:
response = HttpResponse(export_data, content_type=content_type)
except TypeError:
response = HttpResponse(export_data, mimetype=content_type)
response['Content-Disposition'] = 'attachment; filename=%s' % (
self.get_export_filename(file_format),
)
return response
context = {}
if django.VERSION >= (1, 8, 0):
context.update(self.admin_site.each_context(request))
elif django.VERSION >= (1, 7, 0):
context.update(self.admin_site.each_context())
context['form'] = form
context['opts'] = self.model._meta
context['media'] = self.media + form.media
return TemplateResponse(request, [self.export_template_name],
context, current_app=self.admin_site.name)
class ImportExportMixin(ImportMixin, ExportMixin):
"""
Import and export mixin.
"""
#: template for change_list view
change_list_template = 'admin/import_export/change_list_import_export.html'
class GenericImportExportMixin(GenericImportMixin, ExportMixin):
"""
Generic import and export mixin.
"""
#: template for change_list view
change_list_template = 'admin/import_export/generic/change_list_import_export.html'
class ImportExportModelAdmin(ImportExportMixin, admin.ModelAdmin):
"""
Subclass of ModelAdmin with import/export functionality.
"""
class ExportActionModelAdmin(ExportMixin, admin.ModelAdmin):
"""
Subclass of ModelAdmin with export functionality implemented as an
admin action.
"""
# Don't use custom change list template.
change_list_template = None
def __init__(self, *args, **kwargs):
"""
Adds a custom action form initialized with the available export
formats.
"""
choices = []
formats = self.get_export_formats()
if formats:
choices.append(('', '---'))
for i, f in enumerate(formats):
choices.append((str(i), f().get_title()))
self.action_form = export_action_form_factory(choices)
super(ExportActionModelAdmin, self).__init__(*args, **kwargs)
def export_admin_action(self, request, queryset):
"""
Exports the selected rows using file_format.
"""
export_format = request.POST.get('file_format')
if not export_format:
messages.warning(request, _('You must select an export format.'))
else:
formats = self.get_export_formats()
file_format = formats[int(export_format)]()
export_data = self.get_export_data(file_format, queryset)
content_type = file_format.get_content_type()
# Django 1.7 uses the content_type kwarg instead of mimetype
try:
response = HttpResponse(export_data, content_type=content_type)
except TypeError:
response = HttpResponse(export_data, mimetype=content_type)
response['Content-Disposition'] = 'attachment; filename=%s' % (
self.get_export_filename(file_format),
)
return response
export_admin_action.short_description = _(
'Export selected %(verbose_name_plural)s')
actions = [export_admin_action]
class ImportExportActionModelAdmin(ImportMixin, ExportActionModelAdmin):
"""
Subclass of ExportActionModelAdmin with import/export functionality.
Export functionality is implemented as an admin action.
"""
|
{
"content_hash": "9de0cbe7ad09f61eae8be6ac854ef58d",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 141,
"avg_line_length": 36.88857142857143,
"alnum_prop": 0.5887615211834869,
"repo_name": "Apkawa/django-import-export",
"id": "bb0115fe82097719ffbc1a50a0031d81a16feea0",
"size": "25822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "import_export/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "15423"
},
{
"name": "Python",
"bytes": "123162"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
import json
from geofire_utils import validate_key, validate_location, encode_geofire_object, encode_geohash
class GeoFire:
def __init__(self, firebaseRef):
self._firebaseRef = firebaseRef
def set(self, key, location):
validate_key(key)
if location:
# Setting location to null is valid since it will remove the key
validate_location(location)
if location is None:
self._firebaseRef.child(key).remove();
else:
geohash = encode_geohash(location)
self._firebaseRef.child(key).set_with_priority(encode_geofire_object(location, geohash), geohash)
def setMany(self, keys_and_locations):
value = {}
for key, location in keys_and_locations.iteritems():
validate_key(key)
validate_location(location)
geohash = encode_geohash(location)
encoded = encode_geofire_object(location, geohash)
encoded['.priority'] = geohash
value[key] = encoded
self._firebaseRef.set(value)
|
{
"content_hash": "c41730654c0816465f3b21d09ee6deb9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 104,
"avg_line_length": 31.29032258064516,
"alnum_prop": 0.6907216494845361,
"repo_name": "alpire/pitlivebus",
"id": "411b647c5324fe79190a8a415bb2f9922f98ce93",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/firebase/geofire.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32476"
},
{
"name": "JavaScript",
"bytes": "13557"
},
{
"name": "Python",
"bytes": "7326"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import url, patterns, include
from mint.django_rest.rbuilder.discovery import views as discoveryviews
from mint.django_rest.rbuilder.inventory.views.v1 import views as inventoryviews
class URLRegistry(object):
_registry = {}
VERSION = '1'
@classmethod
def URL(cls, regex, *args, **kwargs):
viewName = kwargs.get('name', None)
if not regex.startswith("^"):
regex = "^%s" % regex
if viewName:
# disabling temporarily for now as it seems to be hiding some tracebacks
#oldUrl = cls._registry.get(viewName)
#if oldUrl:
# raise Exception("Duplicate view name: %s (urls: %s, %s)" %
# (viewName, oldUrl, regex))
cls._registry[viewName] = regex
# try to get model name, is optional
modelName = kwargs.pop('model', None)
u = url(regex, *args, **kwargs)
u.model = modelName
return u
URL = URLRegistry.URL
urlpatterns = patterns('',
# not versioned
# support outdated rpath-register (needed for older platforms)
URL(r'^api/systems/?$',
inventoryviews.InventorySystemsService(),
name='SystemsHack2'),
URL(r'^api/?$', discoveryviews.VersionsService(), name='API'),
# API v1
URL(r'^api/v1/?$', discoveryviews.ApiVersionService(), name='APIVersion'),
(r'^api/v1', include('mint.django_rest.v1')),
# API v2
)
|
{
"content_hash": "984544c5114b72e06c5001c8d1e4522c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 33.76744186046512,
"alnum_prop": 0.6150137741046832,
"repo_name": "sassoftware/mint",
"id": "bf90f2c53b5f8b108401187164371e861c22c032",
"size": "2039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mint/django_rest/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50165"
},
{
"name": "Genshi",
"bytes": "58741"
},
{
"name": "HTML",
"bytes": "2814"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Makefile",
"bytes": "92418"
},
{
"name": "NASL",
"bytes": "582"
},
{
"name": "PLpgSQL",
"bytes": "5358"
},
{
"name": "Puppet",
"bytes": "17914"
},
{
"name": "Python",
"bytes": "3239135"
},
{
"name": "Ruby",
"bytes": "9268"
},
{
"name": "Shell",
"bytes": "24834"
}
],
"symlink_target": ""
}
|
__author__ = 'tilmannbruckhaus'
class LargestPalindrome:
# Largest palindrome product
# Problem 4
# A palindromic number reads the same both ways.
# The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 x 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
def __init__(self):
pass
@staticmethod
def is_palindrome(i):
return i == int(str(i)[::-1])
@staticmethod
def find(digits):
largest = 0
factors = [0, 0]
start = 10 ** (digits - 1)
end = 10 ** digits
for i in range(start, end):
for j in range(i, end):
candidate = i * j
if LargestPalindrome.is_palindrome(candidate):
if candidate > largest:
factors = [i, j]
largest = candidate
if __name__ == '__main__':
print "The largest palindrome made from the product of two", digits, "digit numbers is", \
factors[0], "*", factors[1], "=", largest
return largest
if __name__ == '__main__':
for size in range(1, 4):
LargestPalindrome().find(size)
|
{
"content_hash": "58b0f51c899047be82f29e7a31bec712",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 102,
"avg_line_length": 31.205128205128204,
"alnum_prop": 0.5414954806902219,
"repo_name": "bruckhaus/challenges",
"id": "a1ce906a3cbee1f7c80b20a40281e92c48f73930",
"size": "1217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_challenges/project_euler/p004_largest_palindrome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3364"
},
{
"name": "HTML",
"bytes": "12040"
},
{
"name": "Java",
"bytes": "255589"
},
{
"name": "Python",
"bytes": "121414"
},
{
"name": "Ruby",
"bytes": "16650"
},
{
"name": "Scala",
"bytes": "25224"
}
],
"symlink_target": ""
}
|
import shutil
from subprocess import call
from os.path import join, isfile
import argparse
WINDOWS = 'win64'
WINDOWS_DEBUG = 'win64-debug'
LINUX = 'linux'
parser = argparse.ArgumentParser()
parser.add_argument("os", help="the operating system being built for", choices=[WINDOWS, WINDOWS_DEBUG, LINUX])
parser.add_argument("--version", help="the version of the game")
args = parser.parse_args()
# Build game
if args.os == WINDOWS:
call(["go", "build", "-ldflags", "-H=windowsgui"], cwd="../")
elif args.os == LINUX or args.os == WINDOWS_DEBUG:
call(["go", "build"], cwd="../")
# Set the directory/zipfile name
directory = "gokoban-" + args.os + "-bin"
if args.version:
directory += ("-" + args.version)
# Ignore any .blend or .xcf files
ignore_func = lambda d, files: [f for f in files if isfile(join(d, f)) and (f.endswith('.xcf') or f.endswith('.blend'))]
# Copy necessary files
shutil.copytree('../levels', directory + "/levels")
shutil.copytree('../audio', directory + "/audio")
shutil.copytree('../img', directory + "/img", ignore=ignore_func)
shutil.copytree('../gui', directory + "/gui", ignore=ignore_func)
shutil.copytree('../gopher', directory + "/gopher", ignore=ignore_func)
shutil.copy('../LICENSE', directory)
shutil.copy('../README.md', directory)
# Move executable into directory to be archived
shutil.move('../gokoban.exe', directory)
# If windows, need to copy the sound library DLLs
if args.os == WINDOWS or args.os == WINDOWS_DEBUG:
shutil.copy('win/libogg.dll', directory)
shutil.copy('win/libvorbis.dll', directory)
shutil.copy('win/libvorbisfile.dll', directory)
shutil.copy('win/OpenAL32.dll', directory)
shutil.copy('win/vcruntime140.dll', directory)
# Create zip archive and delete temporary directory
shutil.make_archive(directory, 'zip', directory)
shutil.rmtree(directory)
|
{
"content_hash": "3c502101a070ce0eece355eb5be831bf",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 121,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.7075316107751511,
"repo_name": "danaugrs/gokoban",
"id": "e11530957eddb560c07bb3b80b3843ada538e3d4",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dist/generate_zip.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Go",
"bytes": "70687"
}
],
"symlink_target": ""
}
|
import statsmodels
import survivalstan
import random
random.seed(9001)
def load_test_dataset(n=50):
''' Load test dataset from R survival package
'''
dataset = statsmodels.datasets.get_rdataset(package='survival', dataname='flchain' )
d = dataset.data.query('futime > 7').sample(n=n)
d.reset_index(level=0, inplace=True)
d.rename(columns={'futime': 't', 'death': 'event'}, inplace=True)
return(d)
def sim_test_dataset(n=50):
dataset = survivalstan.sim.sim_data_exp_correlated(N=n, censor_time=10)
return(dataset)
def load_test_dataset_long(n=20):
''' Load test dataset from R survival package
'''
d = load_test_dataset(n=n)
dlong = survivalstan.prep_data_long_surv(d, time_col='t', event_col='event')
return dlong
def sim_test_dataset_long(n=20):
d = sim_test_dataset(n=n)
dlong = survivalstan.prep_data_long_surv(d, time_col='t', event_col='event')
return dlong
|
{
"content_hash": "77f64a363ec590822c135a6864c1fa5c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 30.225806451612904,
"alnum_prop": 0.6755602988260405,
"repo_name": "jburos/survivalstan",
"id": "3da952ffd1889c4b57327d3030f5b72ddc2d93e2",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_datasets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2179465"
},
{
"name": "Python",
"bytes": "166591"
},
{
"name": "Shell",
"bytes": "773"
},
{
"name": "Stan",
"bytes": "45974"
}
],
"symlink_target": ""
}
|
from django import forms
from django_core.forms.widgets import CommaSeparatedListWidget
from test_objects.models import TestIntegerListFieldModel
from test_objects.models import TestListFieldModel
class TestListFieldForm(forms.ModelForm):
class Meta:
model = TestListFieldModel
class TestIntegerListFieldForm(forms.ModelForm):
class Meta:
model = TestIntegerListFieldModel
class TestCommaSeparatedListWidgetForm(forms.ModelForm):
class Meta:
model = TestIntegerListFieldModel
fields = ('int_list_field_choices',)
widgets = {
'int_list_field_choices': CommaSeparatedListWidget
}
|
{
"content_hash": "ce6b67983a2b2882bf1c980656de68be",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 24.444444444444443,
"alnum_prop": 0.740909090909091,
"repo_name": "InfoAgeTech/django-core",
"id": "9bbdb32fd92c4d521a77d75f9d1099ebba7c3b7d",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_objects/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "452"
},
{
"name": "Python",
"bytes": "180676"
}
],
"symlink_target": ""
}
|
import os
try:
from setuptools import setup
except:
from distutils.core import setup
from muppet import __version__
readMeFile = open(os.path.join(os.path.dirname(__file__), "README.rst"))
long_description = readMeFile.read()
readMeFile.close()
setup(
name="muppet",
version=__version__,
description="Durable messaging for distributed processing",
long_description=long_description,
url="https://github.com/pandastrike/muppet",
author="Daniel Yoder, Mahesh Yellai",
maintainer="Daniel Yoder, Mahesh Yellai",
author_email="daniel.yoder@gmail.com, mahesh.yellai@gmail.com",
maintainer_email="daniel.yoder@gmail.com, mahesh.yellai@gmail.com",
keywords=["Durable messaging", "Distributed processing", "Redis"],
license="MIT",
packages=["muppet"],
install_requires=["redis>=2.9.1"],
tests_require=["pytest>=2.5.2"]
)
|
{
"content_hash": "2e491b447a8aba7821a92fdfd756a50f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 29.20689655172414,
"alnum_prop": 0.7260920897284534,
"repo_name": "pandastrike/muppet",
"id": "93cc0b5e65835cdb37524a834eabd62d2782a3cf",
"size": "869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10066"
}
],
"symlink_target": ""
}
|
from django import setup
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
)
setup()
|
{
"content_hash": "9ee512f1dc4f68eb72cc9c1036fa499d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 20.857142857142858,
"alnum_prop": 0.5102739726027398,
"repo_name": "Relrin/aiorest-ws",
"id": "eb9e0b9aa4820d54a505ac1dbac534213ad15cc2",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/db/orm/django/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "233"
},
{
"name": "Python",
"bytes": "908265"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import pandora.utils
from pandora.tagger import Tagger
def main(config, train='data/wilhelmus/all_train', dev='data/wilhelmus/all_dev', **kwargs):
print('::: started :::')
params = pandora.utils.get_param_dict(config)
params['config_path'] = config
params.update({k: v for k,v in kwargs.items() if v is not None})
print("::: Loaded Config :::")
for k, v in params.items():
print("\t{} : {}".format(k, v))
train_data = pandora.utils.load_annotated_dir(
train,
format='tab',
extension='.tab',
include_pos=params['include_pos'],
include_lemma=params['include_lemma'],
include_morph=params['include_morph'],
nb_instances=None
)
dev_data = pandora.utils.load_annotated_dir(
dev,
format='tab',
extension='.tab',
include_pos=params['include_pos'],
include_lemma=params['include_lemma'],
include_morph=params['include_morph'],
nb_instances=None
)
tagger = Tagger(**params)
tagger.setup_to_train(
train_data=train_data,
dev_data=dev_data
)
for i in range(int(params['nb_epochs'])):
tagger.epoch()
tagger.save()
tagger.save()
print('::: ended :::')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Training interface of Pandora")
parser.add_argument("config", help="Path to retrieve configuration file")
parser.add_argument("--dev", help="Path to directory containing dev files")
parser.add_argument("--train", help="Path to directory containing train files")
parser.add_argument("--nb_epochs", help="Number of epoch", type=int)
main(**vars(parser.parse_args()))
|
{
"content_hash": "0cbb566937847728264803407e1dc4ba",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 91,
"avg_line_length": 31.385964912280702,
"alnum_prop": 0.6171045276690889,
"repo_name": "PonteIneptique/pandora",
"id": "9e1e49574f531f881b6e038db92257f70d028e29",
"size": "1835",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70322"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
}
|
import time
from logging import getLogger
from Doctopus.Doctopus_main import Check, Handler
log = getLogger('Doctopus.plugins')
class MyCheck(Check):
def __init__(self, configuration):
super(MyCheck, self).__init__(configuration=configuration)
def user_check(self):
"""
:param command: user defined parameter.
:return: the data you requested.
"""
data = 'check的data'
log.debug('%s', data)
time.sleep(2)
yield data
class MyHandler(Handler):
def __init__(self, configuration):
super(MyHandler, self).__init__(configuration=configuration)
def user_handle(self, raw_data):
"""
用户须输出一个dict,可以填写一下键值,也可以不填写
timestamp, 从数据中处理得到的时间戳(整形?)
tags, 根据数据得到的tag
data_value 数据拼接形成的 list 或者 dict,如果为 list,则上层框架
对 list 与 field_name_list 自动组合;如果为 dict,则不处理,认为该数据
已经指定表名
measurement 根据数据类型得到的 influxdb表名
e.g:
list:
{'data_value':[list] , required
'tags':[dict], optional
'table_name',[str] optional
'timestamp',int} optional
dict:
{'data_value':{'fieldname': value} , required
'tags':[dict], optional
'table_name',[str] optional
'timestamp',int} optional
:param raw_data:
:return:
"""
# exmple.
# 数据经过处理之后生成 value_list
log.debug('%s', raw_data)
data_value_list = [raw_data]
tags = {'user_defined_tag': 'data_ralated_tag'}
# user 可以在handle里自己按数据格式制定tags
user_postprocessed = {'data_value': data_value_list,
'tags': tags, }
yield user_postprocessed
|
{
"content_hash": "f2455f4428e2ac4921a769a99d44f3c6",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 68,
"avg_line_length": 26.753846153846155,
"alnum_prop": 0.5658424381828637,
"repo_name": "maboss-YCMan/Doctopus",
"id": "09e4b1263486c30349c186bfd316d18187158cac",
"size": "2021",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "Doctopus/plugins/plugin_prototype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "525"
},
{
"name": "Lua",
"bytes": "3465"
},
{
"name": "Python",
"bytes": "59510"
}
],
"symlink_target": ""
}
|
"""
This is a simple light weight plotting module that can be used with Boa or
easily integrated into your own wxPython application. The emphasis is on small
size and fast plotting for large data sets. It has a reasonable number of
features to do line and scatter graphs easily. It is not as sophisticated or as
powerful as SciPy Plt or Chaco. Both of these are great packages but consume
huge amounts of computer resources for simple plots. They can be found at
http://scipy.com
This file contains two parts; first the re-usable library stuff, then, after
a "if __name__=='__main__'" test, a simple frame and a few default plots
for examples and testing.
Based on wxPlotCanvas
Written by K.Hinsen, R. Srinivasan;
Ported to wxPython Harm van der Heijden, feb 1999
Major Additions Gordon Williams Feb. 2003 (g_will@cyberus.ca)
-More style options
-Zooming using mouse "rubber band"
-Scroll left, right
-Grid(graticule)
-Printing, preview, and page set up (margins)
-Axis and title labels
-Cursor xy axis values
-Doc strings and lots of comments
-Optimizations for large number of points
-Legends
Did a lot of work here to speed markers up. Only a factor of 4 improvement
though. Lines are much faster than markers, especially filled markers. Stay
away from circles and triangles unless you only have a few thousand points.
Times for 25,000 points
Line - 0.078 sec
Markers
Square - 0.22 sec
dot - 0.10
circle - 0.87
cross,plus - 0.28
triangle, triangle_down - 0.90
Thanks to Chris Barker for getting this version working on Linux.
Zooming controls with mouse (when enabled):
Left mouse drag - Zoom box.
Left mouse double click - reset zoom.
Right mouse click - zoom out centred on click location.
"""
import wx
import time, string
# Needs Numeric
try:
import Numeric
except:
try:
import numarray as Numeric #if numarray is used it is renamed Numeric
except:
msg= """
This module requires the Numeric or numarray module,
which could not be imported. It probably is not installed
(it's not part of the standard Python distribution). See the
Python site (http://www.python.org) for information on
downloading source or binaries."""
raise ImportError, "Numeric or numarray not found. \n" + msg
try:
True
except NameError:
True = 1==1
False = 1==0
#
# Plotting classes...
#
class PolyPoints:
"""Base Class for lines and markers
- All methods are private.
"""
def __init__(self, points, attr):
self.points = Numeric.array(points)
self.currentScale= (1,1)
self.currentShift= (0,0)
self.scaled = self.points
self.attributes = {}
self.attributes.update(self._attributes)
for name, value in attr.items():
if name not in self._attributes.keys():
raise KeyError, "Style attribute incorrect. Should be one of %s" %self._attributes.keys()
self.attributes[name] = value
def boundingBox(self):
if len(self.points) == 0:
#no curves to draw
#defaults to (-1,-1) and (1,1) but axis can be set in Draw
minXY= Numeric.array([-1,-1])
maxXY= Numeric.array([ 1, 1])
else:
minXY= Numeric.minimum.reduce(self.points)
maxXY= Numeric.maximum.reduce(self.points)
return minXY, maxXY
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
if len(self.points) == 0:
#no curves to draw
return
if (scale is not self.currentScale) or (shift is not self.currentShift):
#update point scaling
self.scaled = scale*self.points+shift
self.currentScale= scale
self.currentShift= shift
#else unchanged use the current scaling
def getLegend(self):
return self.attributes['legend']
class PolyLine(PolyPoints):
"""Class to define line type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'style': wx.SOLID,
'legend': ''}
def __init__(self, points, **attr):
"""Creates PolyLine object
points - sequence (array, tuple or list) of (x,y) points making up line
**attr - key word attributes
Defaults:
'colour'= 'black', - wxPen Colour any wxNamedColour
'width'= 1, - Pen width
'style'= wxSOLID, - wxPen style
'legend'= '' - Line Legend to display
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale
style= self.attributes['style']
dc.SetPen(wx.Pen(wx.NamedColour(colour), int(width), style))
if coord == None:
dc.DrawLines(self.scaled)
else:
dc.DrawLines(coord) #draw legend line
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
h= self.attributes['width'] * printerScale
w= 5 * h
return (w,h)
class PolyMarker(PolyPoints):
"""Class to define marker type and style
- All methods except __init__ are private.
"""
_attributes = {'colour': 'black',
'width': 1,
'size': 2,
'fillcolour': None,
'fillstyle': wx.SOLID,
'marker': 'circle',
'legend': ''}
def __init__(self, points, **attr):
"""Creates PolyMarker object
points - sequence (array, tuple or list) of (x,y) points
**attr - key word attributes
Defaults:
'colour'= 'black', - wxPen Colour any wxNamedColour
'width'= 1, - Pen width
'size'= 2, - Marker size
'fillcolour'= same as colour, - wxBrush Colour any wxNamedColour
'fillstyle'= wx.SOLID, - wxBrush fill style (use wxTRANSPARENT for no fill)
'marker'= 'circle' - Marker shape
'legend'= '' - Marker Legend to display
Marker Shapes:
- 'circle'
- 'dot'
- 'square'
- 'triangle'
- 'triangle_down'
- 'cross'
- 'plus'
"""
PolyPoints.__init__(self, points, attr)
def draw(self, dc, printerScale, coord= None):
colour = self.attributes['colour']
width = self.attributes['width'] * printerScale
size = self.attributes['size'] * printerScale
fillcolour = self.attributes['fillcolour']
fillstyle = self.attributes['fillstyle']
marker = self.attributes['marker']
dc.SetPen(wx.Pen(wx.NamedColour(colour),int(width)))
if fillcolour:
dc.SetBrush(wx.Brush(wx.NamedColour(fillcolour),fillstyle))
else:
dc.SetBrush(wx.Brush(wx.NamedColour(colour), fillstyle))
if coord == None:
self._drawmarkers(dc, self.scaled, marker, size)
else:
self._drawmarkers(dc, coord, marker, size) #draw legend marker
def getSymExtent(self, printerScale):
"""Width and Height of Marker"""
s= 5*self.attributes['size'] * printerScale
return (s,s)
def _drawmarkers(self, dc, coords, marker,size=1):
f = eval('self._' +marker)
f(dc, coords, size)
def _circle(self, dc, coords, size=1):
fact= 2.5*size
wh= 5.0*size
rect= Numeric.zeros((len(coords),4),Numeric.Float)+[0.0,0.0,wh,wh]
rect[:,0:2]= coords-[fact,fact]
dc.DrawEllipseList(rect.astype(Numeric.Int32))
def _dot(self, dc, coords, size=1):
dc.DrawPointList(coords)
def _square(self, dc, coords, size=1):
fact= 2.5*size
wh= 5.0*size
rect= Numeric.zeros((len(coords),4),Numeric.Float)+[0.0,0.0,wh,wh]
rect[:,0:2]= coords-[fact,fact]
dc.DrawRectangleList(rect.astype(Numeric.Int32))
def _triangle(self, dc, coords, size=1):
shape= [(-2.5*size,1.44*size), (2.5*size,1.44*size), (0.0,-2.88*size)]
poly= Numeric.repeat(coords,3)
poly.shape= (len(coords),3,2)
poly += shape
dc.DrawPolygonList(poly.astype(Numeric.Int32))
def _triangle_down(self, dc, coords, size=1):
shape= [(-2.5*size,-1.44*size), (2.5*size,-1.44*size), (0.0,2.88*size)]
poly= Numeric.repeat(coords,3)
poly.shape= (len(coords),3,2)
poly += shape
dc.DrawPolygonList(poly.astype(Numeric.Int32))
def _cross(self, dc, coords, size=1):
fact= 2.5*size
for f in [[-fact,-fact,fact,fact],[-fact,fact,fact,-fact]]:
lines= Numeric.concatenate((coords,coords),axis=1)+f
dc.DrawLineList(lines.astype(Numeric.Int32))
def _plus(self, dc, coords, size=1):
fact= 2.5*size
for f in [[-fact,0,fact,0],[0,-fact,0,fact]]:
lines= Numeric.concatenate((coords,coords),axis=1)+f
dc.DrawLineList(lines.astype(Numeric.Int32))
class PlotGraphics:
"""Container to hold PolyXXX objects and graph labels
- All methods except __init__ are private.
"""
def __init__(self, objects, title='', xLabel='', yLabel= ''):
"""Creates PlotGraphics object
objects - list of PolyXXX objects to make graph
title - title shown at top of graph
xLabel - label shown on x-axis
yLabel - label shown on y-axis
"""
if type(objects) not in [list,tuple]:
raise TypeError, "objects argument should be list or tuple"
self.objects = objects
self.title= title
self.xLabel= xLabel
self.yLabel= yLabel
def boundingBox(self):
p1, p2 = self.objects[0].boundingBox()
for o in self.objects[1:]:
p1o, p2o = o.boundingBox()
p1 = Numeric.minimum(p1, p1o)
p2 = Numeric.maximum(p2, p2o)
return p1, p2
def scaleAndShift(self, scale=(1,1), shift=(0,0)):
for o in self.objects:
o.scaleAndShift(scale, shift)
def setPrinterScale(self, scale):
"""Thickens up lines and markers only for printing"""
self.printerScale= scale
def setXLabel(self, xLabel= ''):
"""Set the X axis label on the graph"""
self.xLabel= xLabel
def setYLabel(self, yLabel= ''):
"""Set the Y axis label on the graph"""
self.yLabel= yLabel
def setTitle(self, title= ''):
"""Set the title at the top of graph"""
self.title= title
def getXLabel(self):
"""Get x axis label string"""
return self.xLabel
def getYLabel(self):
"""Get y axis label string"""
return self.yLabel
def getTitle(self, title= ''):
"""Get the title at the top of graph"""
return self.title
def draw(self, dc):
for o in self.objects:
#t=time.clock() #profile info
o.draw(dc, self.printerScale)
#dt= time.clock()-t
#print o, "time=", dt
def getSymExtent(self, printerScale):
"""Get max width and height of lines and markers symbols for legend"""
symExt = self.objects[0].getSymExtent(printerScale)
for o in self.objects[1:]:
oSymExt = o.getSymExtent(printerScale)
symExt = Numeric.maximum(symExt, oSymExt)
return symExt
def getLegendNames(self):
"""Returns list of legend names"""
lst = [None]*len(self)
for i in range(len(self)):
lst[i]= self.objects[i].getLegend()
return lst
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.objects[item]
#-------------------------------------------------------------------------------
#Main window that you will want to import into your application.
class PlotCanvas(wx.Window):
"""Subclass of a wxWindow to allow simple general plotting
of data with zoom, labels, and automatic axis scaling."""
def __init__(self, parent, id = -1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style= wx.DEFAULT_FRAME_STYLE, name= ""):
"""Constucts a window, which can be a child of a frame, dialog or
any other non-control window"""
wx.Window.__init__(self, parent, id, pos, size, style, name)
self.border = (1,1)
self.SetBackgroundColour(wx.NamedColour("white"))
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_SIZE(self,self.OnSize)
#Create some mouse events for zooming
wx.EVT_LEFT_DOWN(self, self.OnMouseLeftDown)
wx.EVT_LEFT_UP(self, self.OnMouseLeftUp)
wx.EVT_MOTION(self, self.OnMotion)
wx.EVT_LEFT_DCLICK(self, self.OnMouseDoubleClick)
wx.EVT_RIGHT_DOWN(self, self.OnMouseRightDown)
# set curser as cross-hairs
self.SetCursor(wx.CROSS_CURSOR)
#Things for printing
self.print_data = wx.PrintData()
self.print_data.SetPaperId(wx.PAPER_LETTER)
self.print_data.SetOrientation(wx.LANDSCAPE)
self.pageSetupData= wx.PageSetupDialogData()
self.pageSetupData.SetMarginBottomRight((25,25))
self.pageSetupData.SetMarginTopLeft((25,25))
self.pageSetupData.SetPrintData(self.print_data)
self.printerScale = 1
self.parent= parent
#Zooming variables
self._zoomInFactor = 0.5
self._zoomOutFactor = 2
self._zoomCorner1= Numeric.array([0.0, 0.0]) #left mouse down corner
self._zoomCorner2= Numeric.array([0.0, 0.0]) #left mouse up corner
self._zoomEnabled= False
self._hasDragged= False
#Drawing Variables
self.last_draw = None
self._pointScale= 1
self._pointShift= 0
self._xSpec= 'auto'
self._ySpec= 'auto'
self._gridEnabled= False
self._legendEnabled= False
#Fonts
self._fontCache = {}
self._fontSizeAxis= 10
self._fontSizeTitle= 15
self._fontSizeLegend= 7
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None) #sets the initial size based on client size
#SaveFile
wx.InitAllImageHandlers()
def SaveFile(self, fileName= ''):
"""Saves the file to the type specified in the extension. If no file
name is specified a dialog box is provided. Returns True if sucessful,
otherwise False.
.bmp Save a Windows bitmap file.
.xbm Save an X bitmap file.
.xpm Save an XPM bitmap file.
.png Save a Portable Network Graphics file.
.jpg Save a Joint Photographic Experts Group file.
"""
if string.lower(fileName[-3:]) not in ['bmp','xbm','xpm','png','jpg']:
dlg1 = wx.FileDialog(self, "Choose a file with extension bmp, gif, xbm, xpm, png, or jpg", ".", "",
"BMP files (*.bmp)|*.bmp|XBM files (*.xbm)|*.xbm|XPM file (*.xpm)|*.xpm|PNG files (*.png)|*.png|JPG files (*.jpg)|*.jpg",
wx.SAVE|wx.OVERWRITE_PROMPT)
try:
while 1:
if dlg1.ShowModal() == wx.ID_OK:
fileName = dlg1.GetPath()
#Check for proper exension
if string.lower(fileName[-3:]) not in ['bmp','xbm','xpm','png','jpg']:
dlg2 = wx.MessageDialog(self, 'File name extension\n'
'must be one of\n'
'bmp, xbm, xpm, png, or jpg',
'File Name Error', wx.OK | wx.ICON_ERROR)
try:
dlg2.ShowModal()
finally:
dlg2.Destroy()
else:
break #now save file
else: #exit without saving
return False
finally:
dlg1.Destroy()
#File name has required extension
fType = string.lower(fileName[-3:])
if fType == "bmp":
tp= wx.BITMAP_TYPE_BMP #Save a Windows bitmap file.
elif fType == "xbm":
tp= wx.BITMAP_TYPE_XBM #Save an X bitmap file.
elif fType == "xpm":
tp= wx.BITMAP_TYPE_XPM #Save an XPM bitmap file.
elif fType == "jpg":
tp= wx.BITMAP_TYPE_JPEG #Save a JPG file.
else:
tp= wx.BITMAP_TYPE_PNG #Save a PNG file.
#Save Bitmap
res= self._Buffer.SaveFile(fileName, tp)
return res
def PageSetup(self):
"""Brings up the page setup dialog"""
data = self.pageSetupData
data.SetPrintData(self.print_data)
dlg = wx.PageSetupDialog(self.parent, data)
try:
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData() #returns wxPageSetupDialogData
#updates page parameters from dialog
self.pageSetupData.SetMarginBottomRight(data.GetMarginBottomRight())
self.pageSetupData.SetMarginTopLeft(data.GetMarginTopLeft())
self.pageSetupData.SetPrintData(data.GetPrintData())
self.print_data=data.GetPrintData() #updates print_data
finally:
dlg.Destroy()
def Printout(self, paper=None):
"""Print current plot."""
if paper != None:
self.print_data.SetPaperId(paper)
pdd = wx.PrintDialogData()
pdd.SetPrintData(self.print_data)
printer = wx.Printer(pdd)
out = plot_printout(self)
print_ok = printer.Print(self.parent, out)
if print_ok:
self.print_data = printer.GetPrintDialogData().GetPrintData()
out.Destroy()
def PrintPreview(self):
"""Print-preview current plot."""
printout = plot_printout(self)
printout2 = plot_printout(self)
self.preview = wx.PrintPreview(printout, printout2, self.print_data)
if not self.preview.Ok():
wx.MessageDialog(self, "Print Preview failed.\n" \
"Check that default printer is configured\n", \
"Print error", wx.OK|wx.CENTRE).ShowModal()
self.preview.SetZoom(30)
#search up tree to find frame instance
frameInst= self
while not isinstance(frameInst, wx.Frame):
frameInst= frameInst.GetParent()
frame = wx.PreviewFrame(self.preview, frameInst, "Preview")
frame.Initialize()
frame.SetPosition(self.GetPosition())
frame.SetSize((500,400))
frame.Centre(wx.BOTH)
frame.Show(True)
def SetFontSizeAxis(self, point= 10):
"""Set the tick and axis label font size (default is 10 point)"""
self._fontSizeAxis= point
def GetFontSizeAxis(self):
"""Get current tick and axis label font size in points"""
return self._fontSizeAxis
def SetFontSizeTitle(self, point= 15):
"""Set Title font size (default is 15 point)"""
self._fontSizeTitle= point
def GetFontSizeTitle(self):
"""Get current Title font size in points"""
return self._fontSizeTitle
def SetFontSizeLegend(self, point= 7):
"""Set Legend font size (default is 7 point)"""
self._fontSizeLegend= point
def GetFontSizeLegend(self):
"""Get current Legend font size in points"""
return self._fontSizeLegend
def SetEnableZoom(self, value):
"""Set True to enable zooming."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._zoomEnabled= value
def GetEnableZoom(self):
"""True if zooming enabled."""
return self._zoomEnabled
def SetEnableGrid(self, value):
"""Set True to enable grid."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._gridEnabled= value
self.Redraw()
def GetEnableGrid(self):
"""True if grid enabled."""
return self._gridEnabled
def SetEnableLegend(self, value):
"""Set True to enable legend."""
if value not in [True,False]:
raise TypeError, "Value should be True or False"
self._legendEnabled= value
self.Redraw()
def GetEnableLegend(self):
"""True if Legend enabled."""
return self._legendEnabled
def Reset(self):
"""Unzoom the plot."""
if self.last_draw is not None:
self.Draw(self.last_draw[0])
def ScrollRight(self, units):
"""Move view right number of axis units."""
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
xAxis= (xAxis[0]+units, xAxis[1]+units)
self.Draw(graphics,xAxis,yAxis)
def ScrollUp(self, units):
"""Move view up number of axis units."""
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
yAxis= (yAxis[0]+units, yAxis[1]+units)
self.Draw(graphics,xAxis,yAxis)
def GetXY(self,event):
"""Takes a mouse event and returns the XY user axis values."""
screenPos= Numeric.array( event.GetPosition())
x,y= (screenPos-self._pointShift)/self._pointScale
return x,y
def SetXSpec(self, type= 'auto'):
"""xSpec- defines x axis type. Can be 'none', 'min' or 'auto'
where:
'none' - shows no axis or tick mark values
'min' - shows min bounding box values
'auto' - rounds axis range to sensible values
"""
self._xSpec= type
def SetYSpec(self, type= 'auto'):
"""ySpec- defines x axis type. Can be 'none', 'min' or 'auto'
where:
'none' - shows no axis or tick mark values
'min' - shows min bounding box values
'auto' - rounds axis range to sensible values
"""
self._ySpec= type
def GetXSpec(self):
"""Returns current XSpec for axis"""
return self._xSpec
def GetYSpec(self):
"""Returns current YSpec for axis"""
return self._ySpec
def GetXMaxRange(self):
"""Returns (minX, maxX) x-axis range for displayed graph"""
graphics= self.last_draw[0]
p1, p2 = graphics.boundingBox() #min, max points of graphics
xAxis = self._axisInterval(self._xSpec, p1[0], p2[0]) #in user units
return xAxis
def GetYMaxRange(self):
"""Returns (minY, maxY) y-axis range for displayed graph"""
graphics= self.last_draw[0]
p1, p2 = graphics.boundingBox() #min, max points of graphics
yAxis = self._axisInterval(self._ySpec, p1[1], p2[1])
return yAxis
def GetXCurrentRange(self):
"""Returns (minX, maxX) x-axis for currently displayed portion of graph"""
return self.last_draw[1]
def GetYCurrentRange(self):
"""Returns (minY, maxY) y-axis for currently displayed portion of graph"""
return self.last_draw[2]
def Draw(self, graphics, xAxis = None, yAxis = None, dc = None):
"""Draw objects in graphics with specified x and y axis.
graphics- instance of PlotGraphics with list of PolyXXX objects
xAxis - tuple with (min, max) axis range to view
yAxis - same as xAxis
dc - drawing context - doesn't have to be specified.
If it's not, the offscreen buffer is used
"""
#check Axis is either tuple or none
if type(xAxis) not in [type(None),tuple]:
raise TypeError, "xAxis should be None or (minX,maxX)"
if type(yAxis) not in [type(None),tuple]:
raise TypeError, "yAxis should be None or (minY,maxY)"
#check case for axis = (a,b) where a==b caused by improper zooms
if xAxis != None:
if xAxis[0] == xAxis[1]:
return
if yAxis != None:
if yAxis[0] == yAxis[1]:
return
if dc == None:
# allows using floats for certain functions
dc = FloatDCWrapper(wx.BufferedDC(wx.ClientDC(self), self._Buffer))
dc.Clear()
dc.BeginDrawing()
#dc.Clear()
#set font size for every thing but title and legend
dc.SetFont(self._getFont(self._fontSizeAxis))
#sizes axis to axis type, create lower left and upper right corners of plot
if xAxis == None or yAxis == None:
#One or both axis not specified in Draw
p1, p2 = graphics.boundingBox() #min, max points of graphics
if xAxis == None:
xAxis = self._axisInterval(self._xSpec, p1[0], p2[0]) #in user units
if yAxis == None:
yAxis = self._axisInterval(self._ySpec, p1[1], p2[1])
#Adjust bounding box for axis spec
p1[0],p1[1] = xAxis[0], yAxis[0] #lower left corner user scale (xmin,ymin)
p2[0],p2[1] = xAxis[1], yAxis[1] #upper right corner user scale (xmax,ymax)
else:
#Both axis specified in Draw
p1= Numeric.array([xAxis[0], yAxis[0]]) #lower left corner user scale (xmin,ymin)
p2= Numeric.array([xAxis[1], yAxis[1]]) #upper right corner user scale (xmax,ymax)
self.last_draw = (graphics, xAxis, yAxis) #saves most recient values
#Get ticks and textExtents for axis if required
if self._xSpec is not 'none':
xticks = self._ticks(xAxis[0], xAxis[1])
xTextExtent = dc.GetTextExtent(xticks[-1][1])#w h of x axis text last number on axis
else:
xticks = None
xTextExtent= (0,0) #No text for ticks
if self._ySpec is not 'none':
yticks = self._ticks(yAxis[0], yAxis[1])
yTextExtentBottom= dc.GetTextExtent(yticks[0][1])
yTextExtentTop = dc.GetTextExtent(yticks[-1][1])
yTextExtent= (max(yTextExtentBottom[0],yTextExtentTop[0]),
max(yTextExtentBottom[1],yTextExtentTop[1]))
else:
yticks = None
yTextExtent= (0,0) #No text for ticks
#TextExtents for Title and Axis Labels
titleWH, xLabelWH, yLabelWH= self._titleLablesWH(dc, graphics)
#TextExtents for Legend
legendBoxWH, legendSymExt, legendTextExt = self._legendWH(dc, graphics)
#room around graph area
rhsW= max(xTextExtent[0], legendBoxWH[0]) #use larger of number width or legend width
lhsW= yTextExtent[0]+ yLabelWH[1]
bottomH= max(xTextExtent[1], yTextExtent[1]/2.)+ xLabelWH[1]
topH= yTextExtent[1]/2. + titleWH[1]
textSize_scale= Numeric.array([rhsW+lhsW,bottomH+topH]) #make plot area smaller by text size
textSize_shift= Numeric.array([lhsW, bottomH]) #shift plot area by this amount
#drawing title and labels text
dc.SetFont(self._getFont(self._fontSizeTitle))
titlePos= (self.plotbox_origin[0]+ lhsW + (self.plotbox_size[0]-lhsW-rhsW)/2.- titleWH[0]/2.,
self.plotbox_origin[1]- self.plotbox_size[1])
dc.DrawText(graphics.getTitle(),titlePos[0],titlePos[1])
dc.SetFont(self._getFont(self._fontSizeAxis))
xLabelPos= (self.plotbox_origin[0]+ lhsW + (self.plotbox_size[0]-lhsW-rhsW)/2.- xLabelWH[0]/2.,
self.plotbox_origin[1]- xLabelWH[1])
dc.DrawText(graphics.getXLabel(),xLabelPos[0],xLabelPos[1])
yLabelPos= (self.plotbox_origin[0],
self.plotbox_origin[1]- bottomH- (self.plotbox_size[1]-bottomH-topH)/2.+ yLabelWH[0]/2.)
if graphics.getYLabel(): #bug fix for Linux
dc.DrawRotatedText(graphics.getYLabel(),yLabelPos[0],yLabelPos[1],90)
#drawing legend makers and text
if self._legendEnabled:
self._drawLegend(dc,graphics,rhsW,topH,legendBoxWH, legendSymExt, legendTextExt)
#allow for scaling and shifting plotted points
scale = (self.plotbox_size-textSize_scale) / (p2-p1)* Numeric.array((1,-1))
shift = -p1*scale + self.plotbox_origin + textSize_shift * Numeric.array((1,-1))
self._pointScale= scale #make available for mouse events
self._pointShift= shift
self._drawAxes(dc, p1, p2, scale, shift, xticks, yticks)
graphics.scaleAndShift(scale, shift)
graphics.setPrinterScale(self.printerScale) #thicken up lines and markers if printing
#set clipping area so drawing does not occur outside axis box
ptx,pty,rectWidth,rectHeight= self._point2ClientCoord(p1, p2)
dc.SetClippingRegion(ptx,pty,rectWidth,rectHeight)
#Draw the lines and markers
#start = time.clock()
graphics.draw(dc)
#print "entire graphics drawing took: %f second"%(time.clock() - start)
#remove the clipping region
dc.DestroyClippingRegion()
dc.EndDrawing()
def Redraw(self, dc= None):
"""Redraw the existing plot."""
if self.last_draw is not None:
graphics, xAxis, yAxis= self.last_draw
self.Draw(graphics,xAxis,yAxis,dc)
def Clear(self):
"""Erase the window."""
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
dc.Clear()
self.last_draw = None
def Zoom(self, Center, Ratio):
""" Zoom on the plot
Centers on the X,Y coords given in Center
Zooms by the Ratio = (Xratio, Yratio) given
"""
x,y = Center
if self.last_draw != None:
(graphics, xAxis, yAxis) = self.last_draw
w = (xAxis[1] - xAxis[0]) * Ratio[0]
h = (yAxis[1] - yAxis[0]) * Ratio[1]
xAxis = ( x - w/2, x + w/2 )
yAxis = ( y - h/2, y + h/2 )
self.Draw(graphics, xAxis, yAxis)
# event handlers **********************************
def OnMotion(self, event):
if self._zoomEnabled and event.LeftIsDown():
if self._hasDragged:
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) #remove old
else:
self._hasDragged= True
self._zoomCorner2[0], self._zoomCorner2[1] = self.GetXY(event)
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) #add new
def OnMouseLeftDown(self,event):
self._zoomCorner1[0], self._zoomCorner1[1]= self.GetXY(event)
def OnMouseLeftUp(self, event):
if self._zoomEnabled:
if self._hasDragged == True:
self._drawRubberBand(self._zoomCorner1, self._zoomCorner2) #remove old
self._zoomCorner2[0], self._zoomCorner2[1]= self.GetXY(event)
self._hasDragged = False #reset flag
minX, minY= Numeric.minimum( self._zoomCorner1, self._zoomCorner2)
maxX, maxY= Numeric.maximum( self._zoomCorner1, self._zoomCorner2)
if self.last_draw != None:
self.Draw(self.last_draw[0], xAxis = (minX,maxX), yAxis = (minY,maxY), dc = None)
#else: # A box has not been drawn, zoom in on a point
## this interfered with the double click, so I've disables it.
# X,Y = self.GetXY(event)
# self.Zoom( (X,Y), (self._zoomInFactor,self._zoomInFactor) )
def OnMouseDoubleClick(self,event):
if self._zoomEnabled:
self.Reset()
def OnMouseRightDown(self,event):
if self._zoomEnabled:
X,Y = self.GetXY(event)
self.Zoom( (X,Y), (self._zoomOutFactor, self._zoomOutFactor) )
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
dc = wx.BufferedPaintDC(self, self._Buffer)
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
Size = self.GetClientSizeTuple()
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.EmptyBitmap(Size[0],Size[1])
self._setSize()
if self.last_draw is None:
self.Clear()
else:
graphics, xSpec, ySpec = self.last_draw
self.Draw(graphics,xSpec,ySpec)
#Private Methods **************************************************
def _setSize(self, width=None, height=None):
"""DC width and height."""
if width == None:
(self.width,self.height) = self.GetClientSizeTuple()
else:
self.width, self.height= width,height
self.plotbox_size = 0.97*Numeric.array([self.width, self.height])
xo = 0.5*(self.width-self.plotbox_size[0])
yo = self.height-0.5*(self.height-self.plotbox_size[1])
self.plotbox_origin = Numeric.array([xo, yo])
def _setPrinterScale(self, scale):
"""Used to thicken lines and increase marker size for print out."""
#line thickness on printer is very thin at 600 dot/in. Markers small
self.printerScale= scale
def _printDraw(self, printDC):
"""Used for printing."""
if self.last_draw != None:
graphics, xSpec, ySpec= self.last_draw
self.Draw(graphics,xSpec,ySpec,printDC)
def _drawLegend(self,dc,graphics,rhsW,topH,legendBoxWH, legendSymExt, legendTextExt):
"""Draws legend symbols and text"""
#top right hand corner of graph box is ref corner
trhc= self.plotbox_origin+ (self.plotbox_size-[rhsW,topH])*[1,-1]
legendLHS= .091* legendBoxWH[0] #border space between legend sym and graph box
lineHeight= max(legendSymExt[1], legendTextExt[1]) * 1.1 #1.1 used as space between lines
dc.SetFont(self._getFont(self._fontSizeLegend))
for i in range(len(graphics)):
o = graphics[i]
s= i*lineHeight
if isinstance(o,PolyMarker):
#draw marker with legend
pnt= (trhc[0]+legendLHS+legendSymExt[0]/2., trhc[1]+s+lineHeight/2.)
o.draw(dc, self.printerScale, coord= Numeric.array([pnt]))
elif isinstance(o,PolyLine):
#draw line with legend
pnt1= (trhc[0]+legendLHS, trhc[1]+s+lineHeight/2.)
pnt2= (trhc[0]+legendLHS+legendSymExt[0], trhc[1]+s+lineHeight/2.)
o.draw(dc, self.printerScale, coord= Numeric.array([pnt1,pnt2]))
else:
raise TypeError, "object is neither PolyMarker or PolyLine instance"
#draw legend txt
pnt= (trhc[0]+legendLHS+legendSymExt[0], trhc[1]+s+lineHeight/2.-legendTextExt[1]/2)
dc.DrawText(o.getLegend(),pnt[0],pnt[1])
dc.SetFont(self._getFont(self._fontSizeAxis)) #reset
def _titleLablesWH(self, dc, graphics):
"""Draws Title and labels and returns width and height for each"""
#TextExtents for Title and Axis Labels
dc.SetFont(self._getFont(self._fontSizeTitle))
title= graphics.getTitle()
titleWH= dc.GetTextExtent(title)
dc.SetFont(self._getFont(self._fontSizeAxis))
xLabel, yLabel= graphics.getXLabel(),graphics.getYLabel()
xLabelWH= dc.GetTextExtent(xLabel)
yLabelWH= dc.GetTextExtent(yLabel)
return titleWH, xLabelWH, yLabelWH
def _legendWH(self, dc, graphics):
"""Returns the size in screen units for legend box"""
if self._legendEnabled != True:
legendBoxWH= symExt= txtExt= (0,0)
else:
#find max symbol size
symExt= graphics.getSymExtent(self.printerScale)
#find max legend text extent
dc.SetFont(self._getFont(self._fontSizeLegend))
txtList= graphics.getLegendNames()
txtExt= dc.GetTextExtent(txtList[0])
for txt in graphics.getLegendNames()[1:]:
txtExt= Numeric.maximum(txtExt,dc.GetTextExtent(txt))
maxW= symExt[0]+txtExt[0]
maxH= max(symExt[1],txtExt[1])
#padding .1 for lhs of legend box and space between lines
maxW= maxW* 1.1
maxH= maxH* 1.1 * len(txtList)
dc.SetFont(self._getFont(self._fontSizeAxis))
legendBoxWH= (maxW,maxH)
return (legendBoxWH, symExt, txtExt)
def _drawRubberBand(self, corner1, corner2):
"""Draws/erases rect box from corner1 to corner2"""
ptx,pty,rectWidth,rectHeight= self._point2ClientCoord(corner1, corner2)
#draw rectangle
dc = wx.ClientDC( self )
dc.BeginDrawing()
dc.SetPen(wx.Pen(wx.BLACK))
dc.SetBrush(wx.Brush( wx.WHITE, wx.TRANSPARENT ) )
dc.SetLogicalFunction(wx.INVERT)
dc.DrawRectangle( ptx,pty,rectWidth,rectHeight)
dc.SetLogicalFunction(wx.COPY)
dc.EndDrawing()
def _getFont(self,size):
"""Take font size, adjusts if printing and returns wxFont"""
s = size*self.printerScale
of = self.GetFont()
#Linux speed up to get font from cache rather than X font server
key = (int(s), of.GetFamily (), of.GetStyle (), of.GetWeight ())
font = self._fontCache.get (key, None)
if font:
return font # yeah! cache hit
else:
font = wx.Font(int(s), of.GetFamily(), of.GetStyle(), of.GetWeight())
self._fontCache[key] = font
return font
def _point2ClientCoord(self, corner1, corner2):
"""Converts user point coords to client screen int coords x,y,width,height"""
c1= Numeric.array(corner1)
c2= Numeric.array(corner2)
#convert to screen coords
pt1= c1*self._pointScale+self._pointShift
pt2= c2*self._pointScale+self._pointShift
#make height and width positive
pul= Numeric.minimum(pt1,pt2) #Upper left corner
plr= Numeric.maximum(pt1,pt2) #Lower right corner
rectWidth, rectHeight= plr-pul
ptx,pty= pul
return int(ptx),int(pty),int(rectWidth),int(rectHeight) #return ints
def _axisInterval(self, spec, lower, upper):
"""Returns sensible axis range for given spec"""
if spec == 'none' or spec == 'min':
if lower == upper:
return lower-0.5, upper+0.5
else:
return lower, upper
elif spec == 'auto':
range = upper-lower
if range == 0.:
return lower-0.5, upper+0.5
log = Numeric.log10(range)
power = Numeric.floor(log)
fraction = log-power
if fraction <= 0.05:
power = power-1
grid = 10.**power
lower = lower - lower % grid
mod = upper % grid
if mod != 0:
upper = upper - mod + grid
return lower, upper
elif type(spec) == type(()):
lower, upper = spec
if lower <= upper:
return lower, upper
else:
return upper, lower
else:
raise ValueError, str(spec) + ': illegal axis specification'
def _drawAxes(self, dc, p1, p2, scale, shift, xticks, yticks):
penWidth= self.printerScale #increases thickness for printing only
dc.SetPen(wx.Pen(wx.NamedColour('BLACK'),int(penWidth)))
#set length of tick marks--long ones make grid
if self._gridEnabled:
x,y,width,height= self._point2ClientCoord(p1,p2)
yTickLength= width/2.0 +1
xTickLength= height/2.0 +1
else:
yTickLength= 3 * self.printerScale #lengthens lines for printing
xTickLength= 3 * self.printerScale
if self._xSpec is not 'none':
lower, upper = p1[0],p2[0]
text = 1
for y, d in [(p1[1], -xTickLength), (p2[1], xTickLength)]: #miny, maxy and tick lengths
a1 = scale*Numeric.array([lower, y])+shift
a2 = scale*Numeric.array([upper, y])+shift
dc.DrawLine(a1[0],a1[1],a2[0],a2[1]) #draws upper and lower axis line
for x, label in xticks:
pt = scale*Numeric.array([x, y])+shift
dc.DrawLine(pt[0],pt[1],pt[0],pt[1] + d) #draws tick mark d units
if text:
dc.DrawText(label,pt[0],pt[1])
text = 0 #axis values not drawn on top side
if self._ySpec is not 'none':
lower, upper = p1[1],p2[1]
text = 1
h = dc.GetCharHeight()
for x, d in [(p1[0], -yTickLength), (p2[0], yTickLength)]:
a1 = scale*Numeric.array([x, lower])+shift
a2 = scale*Numeric.array([x, upper])+shift
dc.DrawLine(a1[0],a1[1],a2[0],a2[1])
for y, label in yticks:
pt = scale*Numeric.array([x, y])+shift
dc.DrawLine(pt[0],pt[1],pt[0]-d,pt[1])
if text:
dc.DrawText(label,pt[0]-dc.GetTextExtent(label)[0],
pt[1]-0.5*h)
text = 0 #axis values not drawn on right side
def _ticks(self, lower, upper):
ideal = (upper-lower)/7.
log = Numeric.log10(ideal)
power = Numeric.floor(log)
fraction = log-power
factor = 1.
error = fraction
for f, lf in self._multiples:
e = Numeric.fabs(fraction-lf)
if e < error:
error = e
factor = f
grid = factor * 10.**power
if power > 4 or power < -4:
format = '%+7.1e'
elif power >= 0:
digits = max(1, int(power))
format = '%' + `digits`+'.0f'
else:
digits = -int(power)
format = '%'+`digits+2`+'.'+`digits`+'f'
ticks = []
t = -grid*Numeric.floor(-lower/grid)
while t <= upper:
ticks.append( (t, format % (t,)) )
t = t + grid
return ticks
_multiples = [(2., Numeric.log10(2.)), (5., Numeric.log10(5.))]
#-------------------------------------------------------------------------------
#Used to layout the printer page
class plot_printout(wx.Printout):
"""Controls how the plot is made in printing and previewing"""
# Do not change method names in this class,
# we have to override wxPrintout methods here!
def __init__(self, graph):
"""graph is instance of plotCanvas to be printed or previewed"""
wx.Printout.__init__(self)
self.graph = graph
def HasPage(self, page):
if page == 1:
return True
else:
return False
def GetPageInfo(self):
return (0, 1, 1, 1) #disable page numbers
def OnPrintPage(self, page):
dc = FloatDCWrapper(self.GetDC()) # allows using floats for certain functions
## print "PPI Printer",self.GetPPIPrinter()
## print "PPI Screen", self.GetPPIScreen()
## print "DC GetSize", dc.GetSize()
## print "GetPageSizePixels", self.GetPageSizePixels()
#Note PPIScreen does not give the correct number
#Calulate everything for printer and then scale for preview
PPIPrinter= self.GetPPIPrinter() #printer dots/inch (w,h)
#PPIScreen= self.GetPPIScreen() #screen dots/inch (w,h)
dcSize= dc.GetSizeTuple() #DC size
pageSize= self.GetPageSizePixels() #page size in terms of pixcels
clientDcSize= self.graph.GetClientSizeTuple()
#find what the margins are (mm)
margLeftSize,margTopSize= self.graph.pageSetupData.GetMarginTopLeft()
margRightSize, margBottomSize= self.graph.pageSetupData.GetMarginBottomRight()
#calculate offset and scale for dc
pixLeft= margLeftSize*PPIPrinter[0]/25.4 #mm*(dots/in)/(mm/in)
pixRight= margRightSize*PPIPrinter[0]/25.4
pixTop= margTopSize*PPIPrinter[1]/25.4
pixBottom= margBottomSize*PPIPrinter[1]/25.4
plotAreaW= pageSize[0]-(pixLeft+pixRight)
plotAreaH= pageSize[1]-(pixTop+pixBottom)
#ratio offset and scale to screen size if preview
if self.IsPreview():
ratioW= float(dcSize[0])/pageSize[0]
ratioH= float(dcSize[1])/pageSize[1]
pixLeft *= ratioW
pixTop *= ratioH
plotAreaW *= ratioW
plotAreaH *= ratioH
#rescale plot to page or preview plot area
self.graph._setSize(plotAreaW,plotAreaH)
#Set offset and scale
dc.SetDeviceOrigin(pixLeft,pixTop)
#Thicken up pens and increase marker size for printing
ratioW= float(plotAreaW)/clientDcSize[0]
ratioH= float(plotAreaH)/clientDcSize[1]
aveScale= (ratioW+ratioH)/2
self.graph._setPrinterScale(aveScale) #tickens up pens for printing
self.graph._printDraw(dc)
#rescale back to original
self.graph._setSize()
self.graph._setPrinterScale(1)
return True
# Hack to allow plotting real numbers for the methods listed.
# All others passed directly to DC.
# For Drawing it is used as
# dc = FloatDCWrapper(wx.BufferedDC(wx.ClientDC(self), self._Buffer))
# For printing is is used as
# dc = FloatDCWrapper(self.GetDC())
class FloatDCWrapper:
def __init__(self, aDC):
self.theDC = aDC
def DrawLine(self, x1,y1,x2,y2):
self.theDC.DrawLine(int(x1),int(y1),int(x2),int(y2))
def DrawText(self, txt, x, y):
self.theDC.DrawText(txt, int(x), int(y))
def DrawRotatedText(self, txt, x, y, angle):
self.theDC.DrawRotatedText(txt, int(x), int(y), angle)
def SetClippingRegion(self, x, y, width, height):
self.theDC.SetClippingRegion(int(x), int(y), int(width), int(height))
def SetDeviceOrigin(self, x, y):
self.theDC.SetDeviceOrigin(int(x), int(y))
def __getattr__(self, name):
return getattr(self.theDC, name)
#---------------------------------------------------------------------------
# if running standalone...
#
# ...a sample implementation using the above
#
def __test():
from wxPython.lib.dialogs import wxScrolledMessageDialog
def _draw1Objects():
# 100 points sin function, plotted as green circles
data1 = 2.*Numeric.pi*Numeric.arange(200)/200.
data1.shape = (100, 2)
data1[:,1] = Numeric.sin(data1[:,0])
markers1 = PolyMarker(data1, legend='Green Markers', colour='green', marker='circle',size=1)
# 50 points cos function, plotted as red line
data1 = 2.*Numeric.pi*Numeric.arange(100)/100.
data1.shape = (50,2)
data1[:,1] = Numeric.cos(data1[:,0])
lines = PolyLine(data1, legend= 'Red Line', colour='red')
# A few more points...
pi = Numeric.pi
markers2 = PolyMarker([(0., 0.), (pi/4., 1.), (pi/2, 0.),
(3.*pi/4., -1)], legend='Cross Legend', colour='blue',
marker='cross')
return PlotGraphics([markers1, lines, markers2],"Graph Title", "X Axis", "Y Axis")
def _draw2Objects():
# 100 points sin function, plotted as green dots
data1 = 2.*Numeric.pi*Numeric.arange(200)/200.
data1.shape = (100, 2)
data1[:,1] = Numeric.sin(data1[:,0])
line1 = PolyLine(data1, legend='Green Line', colour='green', width=6, style=wx.DOT)
# 50 points cos function, plotted as red dot-dash
data1 = 2.*Numeric.pi*Numeric.arange(100)/100.
data1.shape = (50,2)
data1[:,1] = Numeric.cos(data1[:,0])
line2 = PolyLine(data1, legend='Red Line', colour='red', width=3, style= wx.DOT_DASH)
# A few more points...
pi = Numeric.pi
markers1 = PolyMarker([(0., 0.), (pi/4., 1.), (pi/2, 0.),
(3.*pi/4., -1)], legend='Cross Hatch Square', colour='blue', width= 3, size= 6,
fillcolour= 'red', fillstyle= wx.CROSSDIAG_HATCH,
marker='square')
return PlotGraphics([markers1, line1, line2], "Big Markers with Different Line Styles")
def _draw3Objects():
markerList= ['circle', 'dot', 'square', 'triangle', 'triangle_down',
'cross', 'plus', 'circle']
m=[]
for i in range(len(markerList)):
m.append(PolyMarker([(2*i+.5,i+.5)], legend=markerList[i], colour='blue',
marker=markerList[i]))
return PlotGraphics(m, "Selection of Markers", "Minimal Axis", "No Axis")
def _draw4Objects():
# 25,000 point line
data1 = Numeric.arange(5e5,1e6,10)
data1.shape = (25000, 2)
line1 = PolyLine(data1, legend='Wide Line', colour='green', width=5)
# A few more points...
markers2 = PolyMarker(data1, legend='Square', colour='blue',
marker='square')
return PlotGraphics([line1, markers2], "25,000 Points", "Value X", "")
def _draw5Objects():
# Empty graph with axis defined but no points/lines
points=[]
line1 = PolyLine(points, legend='Wide Line', colour='green', width=5)
return PlotGraphics([line1], "Empty Plot With Just Axes", "Value X", "Value Y")
class AppFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title,
wx.DefaultPosition, wx.Size(600, 400))
# Now Create the menu bar and items
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
menu.Append(200, 'Page Setup...', 'Setup the printer page')
wx.EVT_MENU(self, 200, self.OnFilePageSetup)
menu.Append(201, 'Print Preview...', 'Show the current plot on page')
wx.EVT_MENU(self, 201, self.OnFilePrintPreview)
menu.Append(202, 'Print...', 'Print the current plot')
wx.EVT_MENU(self, 202, self.OnFilePrint)
menu.Append(203, 'Save Plot...', 'Save current plot')
wx.EVT_MENU(self, 203, self.OnSaveFile)
menu.Append(205, 'E&xit', 'Enough of this already!')
wx.EVT_MENU(self, 205, self.OnFileExit)
self.mainmenu.Append(menu, '&File')
menu = wx.Menu()
menu.Append(206, 'Draw1', 'Draw plots1')
wx.EVT_MENU(self,206,self.OnPlotDraw1)
menu.Append(207, 'Draw2', 'Draw plots2')
wx.EVT_MENU(self,207,self.OnPlotDraw2)
menu.Append(208, 'Draw3', 'Draw plots3')
wx.EVT_MENU(self,208,self.OnPlotDraw3)
menu.Append(209, 'Draw4', 'Draw plots4')
wx.EVT_MENU(self,209,self.OnPlotDraw4)
menu.Append(210, 'Draw5', 'Draw plots5')
wx.EVT_MENU(self,210,self.OnPlotDraw5)
menu.Append(211, '&Redraw', 'Redraw plots')
wx.EVT_MENU(self,211,self.OnPlotRedraw)
menu.Append(212, '&Clear', 'Clear canvas')
wx.EVT_MENU(self,212,self.OnPlotClear)
menu.Append(213, '&Scale', 'Scale canvas')
wx.EVT_MENU(self,213,self.OnPlotScale)
menu.Append(214, 'Enable &Zoom', 'Enable Mouse Zoom', kind=wx.ITEM_CHECK)
wx.EVT_MENU(self,214,self.OnEnableZoom)
menu.Append(215, 'Enable &Grid', 'Turn on Grid', kind=wx.ITEM_CHECK)
wx.EVT_MENU(self,215,self.OnEnableGrid)
menu.Append(220, 'Enable &Legend', 'Turn on Legend', kind=wx.ITEM_CHECK)
wx.EVT_MENU(self,220,self.OnEnableLegend)
menu.Append(225, 'Scroll Up 1', 'Move View Up 1 Unit')
wx.EVT_MENU(self,225,self.OnScrUp)
menu.Append(230, 'Scroll Rt 2', 'Move View Right 2 Units')
wx.EVT_MENU(self,230,self.OnScrRt)
menu.Append(235, '&Plot Reset', 'Reset to original plot')
wx.EVT_MENU(self,235,self.OnReset)
self.mainmenu.Append(menu, '&Plot')
menu = wx.Menu()
menu.Append(300, '&About', 'About this thing...')
wx.EVT_MENU(self, 300, self.OnHelpAbout)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
# A status bar to tell people what's happening
self.CreateStatusBar(1)
self.client = PlotCanvas(self)
#Create mouse event for showing cursor coords in status bar
wx.EVT_LEFT_DOWN(self.client, self.OnMouseLeftDown)
def OnMouseLeftDown(self,event):
s= "Left Mouse Down at Point: (%.4f, %.4f)" % self.client.GetXY(event)
self.SetStatusText(s)
event.Skip()
def OnFilePageSetup(self, event):
self.client.PageSetup()
def OnFilePrintPreview(self, event):
self.client.PrintPreview()
def OnFilePrint(self, event):
self.client.Printout()
def OnSaveFile(self, event):
self.client.SaveFile()
def OnFileExit(self, event):
self.Close()
def OnPlotDraw1(self, event):
self.resetDefaults()
self.client.Draw(_draw1Objects())
def OnPlotDraw2(self, event):
self.resetDefaults()
self.client.Draw(_draw2Objects())
def OnPlotDraw3(self, event):
self.resetDefaults()
self.client.SetFont(wx.Font(10,wx.SCRIPT,wx.NORMAL,wx.NORMAL))
self.client.SetFontSizeAxis(20)
self.client.SetFontSizeLegend(12)
self.client.SetXSpec('min')
self.client.SetYSpec('none')
self.client.Draw(_draw3Objects())
def OnPlotDraw4(self, event):
self.resetDefaults()
drawObj= _draw4Objects()
self.client.Draw(drawObj)
## #profile
## start = time.clock()
## for x in range(10):
## self.client.Draw(drawObj)
## print "10 plots of Draw4 took: %f sec."%(time.clock() - start)
## #profile end
def OnPlotDraw5(self, event):
#Empty plot with just axes
self.resetDefaults()
drawObj= _draw5Objects()
#make the axis X= (0,5), Y=(0,10)
#(default with None is X= (-1,1), Y= (-1,1))
self.client.Draw(drawObj, xAxis= (0,5), yAxis= (0,10))
def OnPlotRedraw(self,event):
self.client.Redraw()
def OnPlotClear(self,event):
self.client.Clear()
def OnPlotScale(self, event):
if self.client.last_draw != None:
graphics, xAxis, yAxis= self.client.last_draw
self.client.Draw(graphics,(1,3.05),(0,1))
def OnEnableZoom(self, event):
self.client.SetEnableZoom(event.IsChecked())
def OnEnableGrid(self, event):
self.client.SetEnableGrid(event.IsChecked())
def OnEnableLegend(self, event):
self.client.SetEnableLegend(event.IsChecked())
def OnScrUp(self, event):
self.client.ScrollUp(1)
def OnScrRt(self,event):
self.client.ScrollRight(2)
def OnReset(self,event):
self.client.Reset()
def OnHelpAbout(self, event):
about = wxScrolledMessageDialog(self, __doc__, "About...")
about.ShowModal()
def resetDefaults(self):
"""Just to reset the fonts back to the PlotCanvas defaults"""
self.client.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.NORMAL))
self.client.SetFontSizeAxis(10)
self.client.SetFontSizeLegend(7)
self.client.SetXSpec('auto')
self.client.SetYSpec('auto')
class MyApp(wx.App):
def OnInit(self):
frame = AppFrame(None, -1, "wxPlotCanvas")
frame.Show(True)
self.SetTopWindow(frame)
return True
app = MyApp(0)
app.MainLoop()
if __name__ == '__main__':
__test()
|
{
"content_hash": "8599263dc283a5d4d13c69efcfbde497",
"timestamp": "",
"source": "github",
"line_count": 1461,
"max_line_length": 134,
"avg_line_length": 39.27926078028747,
"alnum_prop": 0.5665568857058219,
"repo_name": "ivoflipse/devide",
"id": "e8cca12ebf8d7979036aa8c1e4bdb99c92a772ab",
"size": "57752",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "external/wxPyPlot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3102319"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
}
|
"""Private utility functions."""
from collections import ChainMap
from datetime import datetime, date, time, timedelta, tzinfo
from functools import partial
from pickle import dumps, loads, HIGHEST_PROTOCOL
from msgpack import ExtType, packb, unpackb
_default = {
127: (date, partial(dumps, protocol=HIGHEST_PROTOCOL), loads),
126: (datetime, partial(dumps, protocol=HIGHEST_PROTOCOL), loads),
125: (time, partial(dumps, protocol=HIGHEST_PROTOCOL), loads),
124: (timedelta, partial(dumps, protocol=HIGHEST_PROTOCOL), loads),
123: (tzinfo, partial(dumps, protocol=HIGHEST_PROTOCOL), loads),
}
class _Packer:
def __init__(self, *, translation_table=None):
if translation_table is None:
translation_table = _default
else:
translation_table = ChainMap(translation_table, _default)
self.translation_table = translation_table
self._pack_cache = {}
self._unpack_cache = {}
for code in sorted(self.translation_table):
cls, packer, unpacker = self.translation_table[code]
self._pack_cache[cls] = (code, packer)
self._unpack_cache[code] = unpacker
def packb(self, data):
return packb(data, encoding='utf-8', use_bin_type=True,
default=self.ext_type_pack_hook)
def unpackb(self, packed):
return unpackb(packed, use_list=False, encoding='utf-8',
ext_hook=self.ext_type_unpack_hook)
def ext_type_pack_hook(self, obj, _sentinel=object()):
obj_class = obj.__class__
hit = self._pack_cache.get(obj_class, _sentinel)
if hit is None:
# packer has been not found by previous long-lookup
raise TypeError("Unknown type: {!r}".format(obj))
elif hit is _sentinel:
# do long-lookup
for code in sorted(self.translation_table):
cls, packer, unpacker = self.translation_table[code]
if isinstance(obj, cls):
self._pack_cache[obj_class] = (code, packer)
self._unpack_cache[code] = unpacker
return ExtType(code, packer(obj))
else:
self._pack_cache[obj_class] = None
raise TypeError("Unknown type: {!r}".format(obj))
else:
# do shortcut
code, packer = hit
return ExtType(code, packer(obj))
def ext_type_unpack_hook(self, code, data):
try:
unpacker = self._unpack_cache[code]
return unpacker(data)
except KeyError:
return ExtType(code, data)
|
{
"content_hash": "b97375b0fd4587455ac108df9071defe",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 71,
"avg_line_length": 38.26086956521739,
"alnum_prop": 0.5984848484848485,
"repo_name": "claws/aiozmq",
"id": "ce4decaea673f82872421af66dfd9995b21a3445",
"size": "2640",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "aiozmq/rpc/packer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "882"
},
{
"name": "Python",
"bytes": "317723"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import base64
import logging
from collections import Counter
from typing import cast
from pants.engine.internals.scheduler import Workunit
from pants.engine.rules import collect_rules, rule
from pants.engine.streaming_workunit_handler import (
StreamingWorkunitContext,
WorkunitsCallback,
WorkunitsCallbackFactory,
WorkunitsCallbackFactoryRequest,
)
from pants.engine.unions import UnionRule
from pants.option.subsystem import Subsystem
logger = logging.getLogger(__name__)
class StatsAggregatorSubsystem(Subsystem):
options_scope = "stats"
help = "An aggregator for Pants stats, such as cache metrics."
@classmethod
def register_options(cls, register):
register(
"--log",
advanced=True,
type=bool,
default=False,
help=(
"At the end of the Pants run, log all counter metrics and summaries of "
"observation histograms, e.g. the number of cache hits and the time saved by "
"caching.\n\nFor histogram summaries to work, you must add `hdrhistogram` to "
"`[GLOBAL].plugins`."
),
)
@property
def log(self) -> bool:
return cast(bool, self.options.log)
class StatsAggregatorCallback(WorkunitsCallback):
def __init__(self, *, enabled: bool, has_histogram_module: bool) -> None:
super().__init__()
self.enabled = enabled
self.has_histogram_module = has_histogram_module
self.counters: Counter = Counter()
@property
def can_finish_async(self) -> bool:
# If the callback is activated, we need to finish synchronously for access to the console.
return not self.enabled
def __call__(
self,
*,
started_workunits: tuple[Workunit, ...],
completed_workunits: tuple[Workunit, ...],
finished: bool,
context: StreamingWorkunitContext,
) -> None:
if not self.enabled:
return
# Aggregate counters on completed workunits.
for workunit in completed_workunits:
if "counters" in workunit:
for name, value in workunit["counters"].items():
self.counters[name] += value
if not finished:
return
# Add any counters with a count of 0.
for counter in context.run_tracker.counter_names:
if counter not in self.counters:
self.counters[counter] = 0
# Log aggregated counters.
counter_lines = "\n".join(
f" {name}: {count}" for name, count in sorted(self.counters.items())
)
logger.info(f"Counters:\n{counter_lines}")
if not self.has_histogram_module:
return
from hdrh.histogram import HdrHistogram
histograms = context.get_observation_histograms()["histograms"]
if not histograms:
logger.info("No observation histogram were recorded.")
return
logger.info("Observation histogram summaries:")
for name, encoded_histogram in histograms.items():
# Note: The Python library for HDR Histogram will only decode compressed histograms
# that are further encoded with base64. See
# https://github.com/HdrHistogram/HdrHistogram_py/issues/29.
histogram = HdrHistogram.decode(base64.b64encode(encoded_histogram))
percentile_to_vals = "\n".join(
f" p{percentile}: {value}"
for percentile, value in histogram.get_percentile_to_value_dict(
[25, 50, 75, 90, 95, 99]
).items()
)
logger.info(
f"Summary of `{name}` observation histogram:\n"
f" min: {histogram.get_min_value()}\n"
f" max: {histogram.get_max_value()}\n"
f" mean: {histogram.get_mean_value():.3f}\n"
f" std dev: {histogram.get_stddev():.3f}\n"
f" total observations: {histogram.total_count}\n"
f"{percentile_to_vals}"
)
class StatsAggregatorCallbackFactoryRequest:
"""A unique request type that is installed to trigger construction of the WorkunitsCallback."""
@rule
def construct_callback(
_: StatsAggregatorCallbackFactoryRequest, subsystem: StatsAggregatorSubsystem
) -> WorkunitsCallbackFactory:
enabled = subsystem.log
has_histogram_module = False
if enabled:
try:
import hdrh.histogram # noqa: F401
except ImportError:
logger.warning(
"Please run with `--plugins=hdrhistogram` if you would like histogram summaries to "
"be shown at the end of the run, or permanently add "
"`[GLOBAL].plugins = ['hdrhistogram']`. This will cause Pants to install "
"the `hdrhistogram` dependency from PyPI."
)
else:
has_histogram_module = True
return WorkunitsCallbackFactory(
lambda: StatsAggregatorCallback(enabled=enabled, has_histogram_module=has_histogram_module)
)
def rules():
return [
UnionRule(WorkunitsCallbackFactoryRequest, StatsAggregatorCallbackFactoryRequest),
*collect_rules(),
]
|
{
"content_hash": "e0d6238d9fd08f3ea87b070b508cb853",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 100,
"avg_line_length": 34.61038961038961,
"alnum_prop": 0.6125703564727955,
"repo_name": "patricklaw/pants",
"id": "68285c2d4a6d867d302d9f9babc7eb646f2ab74f",
"size": "5462",
"binary": false,
"copies": "2",
"ref": "refs/heads/scala",
"path": "src/python/pants/goal/stats_aggregator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^loader/', include('loader.urls', namespace='loader')),
]
|
{
"content_hash": "20919d3c9926cc9274a4af05d40d00ec",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.6919191919191919,
"repo_name": "lnlfp/lnlfp",
"id": "d65c2d7eb8a962905ba878e9a201f410909f0df6",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lionel/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "241"
},
{
"name": "HTML",
"bytes": "16654"
},
{
"name": "Python",
"bytes": "42344"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="thicknessmode",
parent_name="layout.coloraxis.colorbar",
**kwargs,
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs,
)
|
{
"content_hash": "f22dda1e27b069440a9b81a2bd198502",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 32.11764705882353,
"alnum_prop": 0.5952380952380952,
"repo_name": "plotly/plotly.py",
"id": "c6a034cee8aa2aa28a79932a70f1c03e397cf714",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/_thicknessmode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import calendar
import os
import sys
from nikola.plugin_categories import Task
from nikola.utils import config_changed
class Archive(Task):
"""Render the post archives."""
name = "render_archive"
def gen_tasks(self):
kw = {
"messages": self.site.MESSAGES,
"translations": self.site.config['TRANSLATIONS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"create_monthly_archive": self.site.config['CREATE_MONTHLY_ARCHIVE'],
}
self.site.scan_posts()
# TODO add next/prev links for years
for lang in kw["translations"]:
for year, posts in self.site.posts_per_year.items():
output_name = os.path.join(
kw['output_folder'], self.site.path("archive", year, lang))
context = {}
context["lang"] = lang
context["title"] = kw["messages"][lang]["Posts for year %s"] % year
context["permalink"] = self.site.link("archive", year, lang)
if not kw["create_monthly_archive"]:
template_name = "list_post.tmpl"
post_list = [self.site.global_data[post] for post in posts]
post_list.sort(key=lambda a: a.date)
post_list.reverse()
context["posts"] = post_list
else: # Monthly archives, just list the months
months = set([m.split('/')[1] for m in self.site.posts_per_month.keys() if m.startswith(str(year))])
months = sorted(list(months))
template_name = "list.tmpl"
context["items"] = [[get_month_name(int(month), lang), month] for month in months]
post_list = []
task = self.site.generic_post_list_renderer(
lang,
[],
output_name,
template_name,
kw['filters'],
context,
)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [config_changed(task_cfg)]
task['basename'] = self.name
yield task
if not kw["create_monthly_archive"]:
continue # Just to avoid nesting the other loop in this if
template_name = "list_post.tmpl"
for yearmonth, posts in self.site.posts_per_month.items():
output_name = os.path.join(
kw['output_folder'], self.site.path("archive", yearmonth,
lang))
year, month = yearmonth.split('/')
post_list = [self.site.global_data[post] for post in posts]
post_list.sort(key=lambda a: a.date)
post_list.reverse()
context = {}
context["lang"] = lang
context["posts"] = post_list
context["permalink"] = self.site.link("archive", year, lang)
context["title"] = kw["messages"][lang]["Posts for {month} {year}"].format(
year=year, month=get_month_name(int(month), lang))
task = self.site.generic_post_list_renderer(
lang,
post_list,
output_name,
template_name,
kw['filters'],
context,
)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [config_changed(task_cfg)]
task['basename'] = self.name
yield task
# And global "all your years" page
years = list(self.site.posts_per_year.keys())
years.sort(reverse=True)
template_name = "list.tmpl"
kw['years'] = years
for lang in kw["translations"]:
context = {}
output_name = os.path.join(
kw['output_folder'], self.site.path("archive", None,
lang))
context["title"] = kw["messages"][lang]["Archive"]
context["items"] = [(year, self.site.link("archive", year, lang))
for year in years]
context["permalink"] = self.site.link("archive", None, lang)
task = self.site.generic_post_list_renderer(
lang,
[],
output_name,
template_name,
kw['filters'],
context,
)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [config_changed(task_cfg)]
task['basename'] = self.name
yield task
def get_month_name(month_no, locale):
if sys.version_info[0] == 3: # Python 3
with calendar.different_locale((locale, "UTF-8")):
s = calendar.month_name[month_no]
else: # Python 2
with calendar.TimeEncoding((locale, "UTF-8")):
s = calendar.month_name[month_no]
return s
|
{
"content_hash": "3ee8065b2bca9a7c29e83c356fd1e22c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 120,
"avg_line_length": 42.34146341463415,
"alnum_prop": 0.48444700460829493,
"repo_name": "servalproject/nikola",
"id": "a67826f3433758e9320ae442eb3fd8fd28377bf4",
"size": "6318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/task_archive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "156379"
},
{
"name": "Python",
"bytes": "429762"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
}
|
from typing import Dict
import pytest
from bs4 import BeautifulSoup
from torrt.trackers.ytsmx import YtsmxTracker
@pytest.fixture()
def tracker():
return YtsmxTracker()
@pytest.fixture(scope='module')
def stub_links():
return {
'720P.WEB': '720p_torrent.link',
'1080P.WEB': '1080p_torrent.link',
}
@pytest.mark.parametrize('given,expected', [
({'type': 'web', 'quality': '720p'}, '720P.WEB'),
({'type': 'web', 'quality': '1080p'}, '1080P.WEB'),
])
def test_quality_from_torrent(given: Dict[str, str], expected: str):
assert YtsmxTracker._get_quality_from_torrent(given) == expected
@pytest.mark.parametrize('given,expected', [
(' 720P.web', '720P.WEB'),
('1080p.wEb ', '1080P.WEB'),
])
def test_sanitize_config_quality(given: str, expected: str):
assert YtsmxTracker._sanitize_quality(given) == expected
def test_extract_movie_id(tracker: YtsmxTracker):
stub_html = '''
<!DOCTYPE html><html>
<head></head>
<body><div id="movie-info" data-movie-id="123"></body></html>
'''
soup = BeautifulSoup(stub_html, features="lxml")
movie_id = tracker._extract_movie_id(soup)
assert movie_id == '123'
def test_quality_links(tracker: YtsmxTracker):
stub_details = {
'data': {
'movie': {
'torrents': [
{
'quality': '720p',
'type': 'web',
'url': 'http://example.com/torrent.file'
}
]
}
}
}
links = tracker._get_quality_links(stub_details)
items = list(links.items())
assert len(items) == 1
key, link = items[0]
assert key == '720P.WEB'
assert link == 'http://example.com/torrent.file'
@pytest.mark.parametrize('preffered, expected', [
(['1080P.WEB', '720P.WEB'], ('1080P.WEB', '1080p_torrent.link')),
(['720P.WEB', '1080P.WEB'], ('720P.WEB', '720p_torrent.link')),
(['8K.HDTV', '4K.WEB', '2K,HDTV'], None),
([], None),
])
def test_preffered_quality(preffered, expected, stub_links):
tracker = YtsmxTracker(quality_prefs=preffered)
assert tracker._get_preffered_link(stub_links) == expected
|
{
"content_hash": "bd727196833d44b81b0badc9a816219d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 69,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.5807467386414755,
"repo_name": "idlesign/torrt",
"id": "31909d03781c04e571c36da1bcb70838e9ae0e8f",
"size": "2223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/trackers/test_ytsmx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "68334"
},
{
"name": "Python",
"bytes": "150566"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.